diff --git a/models/migrations/v1_10/v100.go b/models/migrations/v1_10/v100.go index e94024f4df43b..170a30631f888 100644 --- a/models/migrations/v1_10/v100.go +++ b/models/migrations/v1_10/v100.go @@ -4,79 +4,35 @@ package v1_10 //nolint import ( - "net/url" - "strings" - "time" + "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" ) -func UpdateMigrationServiceTypes(x *xorm.Engine) error { - type Repository struct { - ID int64 - OriginalServiceType int `xorm:"index default(0)"` - OriginalURL string `xorm:"VARCHAR(2048)"` - } - - if err := x.Sync2(new(Repository)); err != nil { - return err - } - - var last int - const batchSize = 50 - for { - results := make([]Repository, 0, batchSize) - err := x.Where("original_url <> '' AND original_url IS NOT NULL"). - And("original_service_type = 0 OR original_service_type IS NULL"). - OrderBy("id"). - Limit(batchSize, last). - Find(&results) - if err != nil { - return err - } - if len(results) == 0 { - break - } - last += len(results) - - const PlainGitService = 1 // 1 plain git service - const GithubService = 2 // 2 github.com - - for _, res := range results { - u, err := url.Parse(res.OriginalURL) - if err != nil { - return err - } - serviceType := PlainGitService - if strings.EqualFold(u.Host, "github.com") { - serviceType = GithubService - } - _, err = x.Exec("UPDATE repository SET original_service_type = ? WHERE id = ?", serviceType, res.ID) - if err != nil { - return err - } - } +func AddTaskTable(x *xorm.Engine) error { + // TaskType defines task type + type TaskType int + + // TaskStatus defines task status + type TaskStatus int + + type Task struct { + ID int64 + DoerID int64 `xorm:"index"` // operator + OwnerID int64 `xorm:"index"` // repo owner id, when creating, the repoID maybe zero + RepoID int64 `xorm:"index"` + Type TaskType + Status TaskStatus `xorm:"index"` + StartTime timeutil.TimeStamp + EndTime timeutil.TimeStamp + PayloadContent string `xorm:"TEXT"` + Errors string `xorm:"TEXT"` // if task failed, saved the error reason + Created timeutil.TimeStamp `xorm:"created"` } - type ExternalLoginUser struct { - ExternalID string `xorm:"pk NOT NULL"` - UserID int64 `xorm:"INDEX NOT NULL"` - LoginSourceID int64 `xorm:"pk NOT NULL"` - RawData map[string]any `xorm:"TEXT JSON"` - Provider string `xorm:"index VARCHAR(25)"` - Email string - Name string - FirstName string - LastName string - NickName string - Description string - AvatarURL string - Location string - AccessToken string - AccessTokenSecret string - RefreshToken string - ExpiresAt time.Time + type Repository struct { + Status int `xorm:"NOT NULL DEFAULT 0"` } - return x.Sync2(new(ExternalLoginUser)) + return x.Sync2(new(Task), new(Repository)) } diff --git a/models/migrations/v1_10/v101.go b/models/migrations/v1_10/v101.go index 79b419e9d97cc..e94024f4df43b 100644 --- a/models/migrations/v1_10/v101.go +++ b/models/migrations/v1_10/v101.go @@ -4,14 +4,78 @@ package v1_10 //nolint import ( + "net/url" + "strings" + "time" + "xorm.io/xorm" ) -func ChangeSomeColumnsLengthOfExternalLoginUser(x *xorm.Engine) error { +func UpdateMigrationServiceTypes(x *xorm.Engine) error { + type Repository struct { + ID int64 + OriginalServiceType int `xorm:"index default(0)"` + OriginalURL string `xorm:"VARCHAR(2048)"` + } + + if err := x.Sync2(new(Repository)); err != nil { + return err + } + + var last int + const batchSize = 50 + for { + results := make([]Repository, 0, batchSize) + err := x.Where("original_url <> '' AND original_url IS NOT NULL"). + And("original_service_type = 0 OR original_service_type IS NULL"). + OrderBy("id"). + Limit(batchSize, last). + Find(&results) + if err != nil { + return err + } + if len(results) == 0 { + break + } + last += len(results) + + const PlainGitService = 1 // 1 plain git service + const GithubService = 2 // 2 github.com + + for _, res := range results { + u, err := url.Parse(res.OriginalURL) + if err != nil { + return err + } + serviceType := PlainGitService + if strings.EqualFold(u.Host, "github.com") { + serviceType = GithubService + } + _, err = x.Exec("UPDATE repository SET original_service_type = ? WHERE id = ?", serviceType, res.ID) + if err != nil { + return err + } + } + } + type ExternalLoginUser struct { - AccessToken string `xorm:"TEXT"` - AccessTokenSecret string `xorm:"TEXT"` - RefreshToken string `xorm:"TEXT"` + ExternalID string `xorm:"pk NOT NULL"` + UserID int64 `xorm:"INDEX NOT NULL"` + LoginSourceID int64 `xorm:"pk NOT NULL"` + RawData map[string]any `xorm:"TEXT JSON"` + Provider string `xorm:"index VARCHAR(25)"` + Email string + Name string + FirstName string + LastName string + NickName string + Description string + AvatarURL string + Location string + AccessToken string + AccessTokenSecret string + RefreshToken string + ExpiresAt time.Time } return x.Sync2(new(ExternalLoginUser)) diff --git a/models/migrations/v1_10/v102.go b/models/migrations/v1_10/v102.go new file mode 100644 index 0000000000000..79b419e9d97cc --- /dev/null +++ b/models/migrations/v1_10/v102.go @@ -0,0 +1,18 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_10 //nolint + +import ( + "xorm.io/xorm" +) + +func ChangeSomeColumnsLengthOfExternalLoginUser(x *xorm.Engine) error { + type ExternalLoginUser struct { + AccessToken string `xorm:"TEXT"` + AccessTokenSecret string `xorm:"TEXT"` + RefreshToken string `xorm:"TEXT"` + } + + return x.Sync2(new(ExternalLoginUser)) +} diff --git a/models/migrations/v1_10/v88.go b/models/migrations/v1_10/v88.go deleted file mode 100644 index e6376af62e49d..0000000000000 --- a/models/migrations/v1_10/v88.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_10 //nolint - -import ( - "crypto/sha1" - "fmt" - - "xorm.io/xorm" -) - -func hashContext(context string) string { - return fmt.Sprintf("%x", sha1.Sum([]byte(context))) -} - -func AddCommitStatusContext(x *xorm.Engine) error { - type CommitStatus struct { - ID int64 `xorm:"pk autoincr"` - ContextHash string `xorm:"char(40) index"` - Context string `xorm:"TEXT"` - } - - if err := x.Sync2(new(CommitStatus)); err != nil { - return err - } - - sess := x.NewSession() - defer sess.Close() - - start := 0 - for { - statuses := make([]*CommitStatus, 0, 100) - err := sess.OrderBy("id").Limit(100, start).Find(&statuses) - if err != nil { - return err - } - if len(statuses) == 0 { - break - } - - if err = sess.Begin(); err != nil { - return err - } - - for _, status := range statuses { - status.ContextHash = hashContext(status.Context) - if _, err := sess.ID(status.ID).Cols("context_hash").Update(status); err != nil { - return err - } - } - - if err := sess.Commit(); err != nil { - return err - } - - if len(statuses) < 100 { - break - } - - start += len(statuses) - } - - return nil -} diff --git a/models/migrations/v1_10/v89.go b/models/migrations/v1_10/v89.go index 937068292c221..e6376af62e49d 100644 --- a/models/migrations/v1_10/v89.go +++ b/models/migrations/v1_10/v89.go @@ -3,33 +3,63 @@ package v1_10 //nolint -import "xorm.io/xorm" +import ( + "crypto/sha1" + "fmt" -func AddOriginalMigrationInfo(x *xorm.Engine) error { - // Issue see models/issue.go - type Issue struct { - OriginalAuthor string - OriginalAuthorID int64 - } + "xorm.io/xorm" +) - if err := x.Sync2(new(Issue)); err != nil { - return err - } +func hashContext(context string) string { + return fmt.Sprintf("%x", sha1.Sum([]byte(context))) +} - // Issue see models/issue_comment.go - type Comment struct { - OriginalAuthor string - OriginalAuthorID int64 +func AddCommitStatusContext(x *xorm.Engine) error { + type CommitStatus struct { + ID int64 `xorm:"pk autoincr"` + ContextHash string `xorm:"char(40) index"` + Context string `xorm:"TEXT"` } - if err := x.Sync2(new(Comment)); err != nil { + if err := x.Sync2(new(CommitStatus)); err != nil { return err } - // Issue see models/repo.go - type Repository struct { - OriginalURL string + sess := x.NewSession() + defer sess.Close() + + start := 0 + for { + statuses := make([]*CommitStatus, 0, 100) + err := sess.OrderBy("id").Limit(100, start).Find(&statuses) + if err != nil { + return err + } + if len(statuses) == 0 { + break + } + + if err = sess.Begin(); err != nil { + return err + } + + for _, status := range statuses { + status.ContextHash = hashContext(status.Context) + if _, err := sess.ID(status.ID).Cols("context_hash").Update(status); err != nil { + return err + } + } + + if err := sess.Commit(); err != nil { + return err + } + + if len(statuses) < 100 { + break + } + + start += len(statuses) } - return x.Sync2(new(Repository)) + return nil } diff --git a/models/migrations/v1_10/v90.go b/models/migrations/v1_10/v90.go index c9a69a6dfe30e..937068292c221 100644 --- a/models/migrations/v1_10/v90.go +++ b/models/migrations/v1_10/v90.go @@ -5,12 +5,30 @@ package v1_10 //nolint import "xorm.io/xorm" -func ChangeSomeColumnsLengthOfRepo(x *xorm.Engine) error { +func AddOriginalMigrationInfo(x *xorm.Engine) error { + // Issue see models/issue.go + type Issue struct { + OriginalAuthor string + OriginalAuthorID int64 + } + + if err := x.Sync2(new(Issue)); err != nil { + return err + } + + // Issue see models/issue_comment.go + type Comment struct { + OriginalAuthor string + OriginalAuthorID int64 + } + + if err := x.Sync2(new(Comment)); err != nil { + return err + } + + // Issue see models/repo.go type Repository struct { - ID int64 `xorm:"pk autoincr"` - Description string `xorm:"TEXT"` - Website string `xorm:"VARCHAR(2048)"` - OriginalURL string `xorm:"VARCHAR(2048)"` + OriginalURL string } return x.Sync2(new(Repository)) diff --git a/models/migrations/v1_10/v91.go b/models/migrations/v1_10/v91.go index 9b5fefb1d02a8..c9a69a6dfe30e 100644 --- a/models/migrations/v1_10/v91.go +++ b/models/migrations/v1_10/v91.go @@ -5,21 +5,13 @@ package v1_10 //nolint import "xorm.io/xorm" -func AddIndexOnRepositoryAndComment(x *xorm.Engine) error { +func ChangeSomeColumnsLengthOfRepo(x *xorm.Engine) error { type Repository struct { - ID int64 `xorm:"pk autoincr"` - OwnerID int64 `xorm:"index"` + ID int64 `xorm:"pk autoincr"` + Description string `xorm:"TEXT"` + Website string `xorm:"VARCHAR(2048)"` + OriginalURL string `xorm:"VARCHAR(2048)"` } - if err := x.Sync2(new(Repository)); err != nil { - return err - } - - type Comment struct { - ID int64 `xorm:"pk autoincr"` - Type int `xorm:"index"` - ReviewID int64 `xorm:"index"` - } - - return x.Sync2(new(Comment)) + return x.Sync2(new(Repository)) } diff --git a/models/migrations/v1_10/v92.go b/models/migrations/v1_10/v92.go index 9080108594cd0..9b5fefb1d02a8 100644 --- a/models/migrations/v1_10/v92.go +++ b/models/migrations/v1_10/v92.go @@ -3,12 +3,23 @@ package v1_10 //nolint -import ( - "xorm.io/builder" - "xorm.io/xorm" -) - -func RemoveLingeringIndexStatus(x *xorm.Engine) error { - _, err := x.Exec(builder.Delete(builder.NotIn("`repo_id`", builder.Select("`id`").From("`repository`"))).From("`repo_indexer_status`")) - return err +import "xorm.io/xorm" + +func AddIndexOnRepositoryAndComment(x *xorm.Engine) error { + type Repository struct { + ID int64 `xorm:"pk autoincr"` + OwnerID int64 `xorm:"index"` + } + + if err := x.Sync2(new(Repository)); err != nil { + return err + } + + type Comment struct { + ID int64 `xorm:"pk autoincr"` + Type int `xorm:"index"` + ReviewID int64 `xorm:"index"` + } + + return x.Sync2(new(Comment)) } diff --git a/models/migrations/v1_10/v93.go b/models/migrations/v1_10/v93.go index 5b59065171e56..9080108594cd0 100644 --- a/models/migrations/v1_10/v93.go +++ b/models/migrations/v1_10/v93.go @@ -3,13 +3,12 @@ package v1_10 //nolint -import "xorm.io/xorm" - -func AddEmailNotificationEnabledToUser(x *xorm.Engine) error { - // User see models/user.go - type User struct { - EmailNotificationsPreference string `xorm:"VARCHAR(20) NOT NULL DEFAULT 'enabled'"` - } - - return x.Sync2(new(User)) +import ( + "xorm.io/builder" + "xorm.io/xorm" +) + +func RemoveLingeringIndexStatus(x *xorm.Engine) error { + _, err := x.Exec(builder.Delete(builder.NotIn("`repo_id`", builder.Select("`id`").From("`repository`"))).From("`repo_indexer_status`")) + return err } diff --git a/models/migrations/v1_10/v94.go b/models/migrations/v1_10/v94.go index fe3804aee428f..5b59065171e56 100644 --- a/models/migrations/v1_10/v94.go +++ b/models/migrations/v1_10/v94.go @@ -5,19 +5,11 @@ package v1_10 //nolint import "xorm.io/xorm" -func AddStatusCheckColumnsForProtectedBranches(x *xorm.Engine) error { - type ProtectedBranch struct { - EnableStatusCheck bool `xorm:"NOT NULL DEFAULT false"` - StatusCheckContexts []string `xorm:"JSON TEXT"` +func AddEmailNotificationEnabledToUser(x *xorm.Engine) error { + // User see models/user.go + type User struct { + EmailNotificationsPreference string `xorm:"VARCHAR(20) NOT NULL DEFAULT 'enabled'"` } - if err := x.Sync2(new(ProtectedBranch)); err != nil { - return err - } - - _, err := x.Cols("enable_status_check", "status_check_contexts").Update(&ProtectedBranch{ - EnableStatusCheck: false, - StatusCheckContexts: []string{}, - }) - return err + return x.Sync2(new(User)) } diff --git a/models/migrations/v1_10/v95.go b/models/migrations/v1_10/v95.go index 1b60eefb4272e..fe3804aee428f 100644 --- a/models/migrations/v1_10/v95.go +++ b/models/migrations/v1_10/v95.go @@ -5,15 +5,19 @@ package v1_10 //nolint import "xorm.io/xorm" -func AddCrossReferenceColumns(x *xorm.Engine) error { - // Comment see models/comment.go - type Comment struct { - RefRepoID int64 `xorm:"index"` - RefIssueID int64 `xorm:"index"` - RefCommentID int64 `xorm:"index"` - RefAction int64 `xorm:"SMALLINT"` - RefIsPull bool +func AddStatusCheckColumnsForProtectedBranches(x *xorm.Engine) error { + type ProtectedBranch struct { + EnableStatusCheck bool `xorm:"NOT NULL DEFAULT false"` + StatusCheckContexts []string `xorm:"JSON TEXT"` } - return x.Sync2(new(Comment)) + if err := x.Sync2(new(ProtectedBranch)); err != nil { + return err + } + + _, err := x.Cols("enable_status_check", "status_check_contexts").Update(&ProtectedBranch{ + EnableStatusCheck: false, + StatusCheckContexts: []string{}, + }) + return err } diff --git a/models/migrations/v1_10/v96.go b/models/migrations/v1_10/v96.go index 34c8240031c20..1b60eefb4272e 100644 --- a/models/migrations/v1_10/v96.go +++ b/models/migrations/v1_10/v96.go @@ -3,62 +3,17 @@ package v1_10 //nolint -import ( - "path/filepath" - - "code.gitea.io/gitea/modules/setting" - "code.gitea.io/gitea/modules/util" - - "xorm.io/xorm" -) - -func DeleteOrphanedAttachments(x *xorm.Engine) error { - type Attachment struct { - ID int64 `xorm:"pk autoincr"` - UUID string `xorm:"uuid UNIQUE"` - IssueID int64 `xorm:"INDEX"` - ReleaseID int64 `xorm:"INDEX"` - CommentID int64 +import "xorm.io/xorm" + +func AddCrossReferenceColumns(x *xorm.Engine) error { + // Comment see models/comment.go + type Comment struct { + RefRepoID int64 `xorm:"index"` + RefIssueID int64 `xorm:"index"` + RefCommentID int64 `xorm:"index"` + RefAction int64 `xorm:"SMALLINT"` + RefIsPull bool } - sess := x.NewSession() - defer sess.Close() - - limit := setting.Database.IterateBufferSize - if limit <= 0 { - limit = 50 - } - - for { - attachments := make([]Attachment, 0, limit) - if err := sess.Where("`issue_id` = 0 and (`release_id` = 0 or `release_id` not in (select `id` from `release`))"). - Cols("id, uuid").Limit(limit). - Asc("id"). - Find(&attachments); err != nil { - return err - } - if len(attachments) == 0 { - return nil - } - - ids := make([]int64, 0, limit) - for _, attachment := range attachments { - ids = append(ids, attachment.ID) - } - if len(ids) > 0 { - if _, err := sess.In("id", ids).Delete(new(Attachment)); err != nil { - return err - } - } - - for _, attachment := range attachments { - uuid := attachment.UUID - if err := util.RemoveAll(filepath.Join(setting.Attachment.Storage.Path, uuid[0:1], uuid[1:2], uuid)); err != nil { - return err - } - } - if len(attachments) < limit { - return nil - } - } + return x.Sync2(new(Comment)) } diff --git a/models/migrations/v1_10/v97.go b/models/migrations/v1_10/v97.go index 8a1a4426ab2c1..34c8240031c20 100644 --- a/models/migrations/v1_10/v97.go +++ b/models/migrations/v1_10/v97.go @@ -3,12 +3,62 @@ package v1_10 //nolint -import "xorm.io/xorm" +import ( + "path/filepath" -func AddRepoAdminChangeTeamAccessColumnForUser(x *xorm.Engine) error { - type User struct { - RepoAdminChangeTeamAccess bool `xorm:"NOT NULL DEFAULT false"` + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/util" + + "xorm.io/xorm" +) + +func DeleteOrphanedAttachments(x *xorm.Engine) error { + type Attachment struct { + ID int64 `xorm:"pk autoincr"` + UUID string `xorm:"uuid UNIQUE"` + IssueID int64 `xorm:"INDEX"` + ReleaseID int64 `xorm:"INDEX"` + CommentID int64 + } + + sess := x.NewSession() + defer sess.Close() + + limit := setting.Database.IterateBufferSize + if limit <= 0 { + limit = 50 } - return x.Sync2(new(User)) + for { + attachments := make([]Attachment, 0, limit) + if err := sess.Where("`issue_id` = 0 and (`release_id` = 0 or `release_id` not in (select `id` from `release`))"). + Cols("id, uuid").Limit(limit). + Asc("id"). + Find(&attachments); err != nil { + return err + } + if len(attachments) == 0 { + return nil + } + + ids := make([]int64, 0, limit) + for _, attachment := range attachments { + ids = append(ids, attachment.ID) + } + if len(ids) > 0 { + if _, err := sess.In("id", ids).Delete(new(Attachment)); err != nil { + return err + } + } + + for _, attachment := range attachments { + uuid := attachment.UUID + if err := util.RemoveAll(filepath.Join(setting.Attachment.Storage.Path, uuid[0:1], uuid[1:2], uuid)); err != nil { + return err + } + } + if len(attachments) < limit { + return nil + } + } } diff --git a/models/migrations/v1_10/v98.go b/models/migrations/v1_10/v98.go index cab9a6306503d..8a1a4426ab2c1 100644 --- a/models/migrations/v1_10/v98.go +++ b/models/migrations/v1_10/v98.go @@ -5,12 +5,10 @@ package v1_10 //nolint import "xorm.io/xorm" -func AddOriginalAuthorOnMigratedReleases(x *xorm.Engine) error { - type Release struct { - ID int64 - OriginalAuthor string - OriginalAuthorID int64 `xorm:"index"` +func AddRepoAdminChangeTeamAccessColumnForUser(x *xorm.Engine) error { + type User struct { + RepoAdminChangeTeamAccess bool `xorm:"NOT NULL DEFAULT false"` } - return x.Sync2(new(Release)) + return x.Sync2(new(User)) } diff --git a/models/migrations/v1_10/v99.go b/models/migrations/v1_10/v99.go index 170a30631f888..cab9a6306503d 100644 --- a/models/migrations/v1_10/v99.go +++ b/models/migrations/v1_10/v99.go @@ -3,36 +3,14 @@ package v1_10 //nolint -import ( - "code.gitea.io/gitea/modules/timeutil" +import "xorm.io/xorm" - "xorm.io/xorm" -) - -func AddTaskTable(x *xorm.Engine) error { - // TaskType defines task type - type TaskType int - - // TaskStatus defines task status - type TaskStatus int - - type Task struct { - ID int64 - DoerID int64 `xorm:"index"` // operator - OwnerID int64 `xorm:"index"` // repo owner id, when creating, the repoID maybe zero - RepoID int64 `xorm:"index"` - Type TaskType - Status TaskStatus `xorm:"index"` - StartTime timeutil.TimeStamp - EndTime timeutil.TimeStamp - PayloadContent string `xorm:"TEXT"` - Errors string `xorm:"TEXT"` // if task failed, saved the error reason - Created timeutil.TimeStamp `xorm:"created"` - } - - type Repository struct { - Status int `xorm:"NOT NULL DEFAULT 0"` +func AddOriginalAuthorOnMigratedReleases(x *xorm.Engine) error { + type Release struct { + ID int64 + OriginalAuthor string + OriginalAuthorID int64 `xorm:"index"` } - return x.Sync2(new(Task), new(Repository)) + return x.Sync2(new(Release)) } diff --git a/models/migrations/v1_11/v102.go b/models/migrations/v1_11/v102.go deleted file mode 100644 index 9358e4cef3344..0000000000000 --- a/models/migrations/v1_11/v102.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_11 //nolint - -import ( - "code.gitea.io/gitea/models/migrations/base" - - "xorm.io/xorm" -) - -func DropColumnHeadUserNameOnPullRequest(x *xorm.Engine) error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - if err := base.DropTableColumns(sess, "pull_request", "head_user_name"); err != nil { - return err - } - return sess.Commit() -} diff --git a/models/migrations/v1_11/v103.go b/models/migrations/v1_11/v103.go index e4e16a054e827..9358e4cef3344 100644 --- a/models/migrations/v1_11/v103.go +++ b/models/migrations/v1_11/v103.go @@ -4,14 +4,19 @@ package v1_11 //nolint import ( + "code.gitea.io/gitea/models/migrations/base" + "xorm.io/xorm" ) -func AddWhitelistDeployKeysToBranches(x *xorm.Engine) error { - type ProtectedBranch struct { - ID int64 - WhitelistDeployKeys bool `xorm:"NOT NULL DEFAULT false"` +func DropColumnHeadUserNameOnPullRequest(x *xorm.Engine) error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err } - - return x.Sync2(new(ProtectedBranch)) + if err := base.DropTableColumns(sess, "pull_request", "head_user_name"); err != nil { + return err + } + return sess.Commit() } diff --git a/models/migrations/v1_11/v104.go b/models/migrations/v1_11/v104.go index c76554cf59f7a..e4e16a054e827 100644 --- a/models/migrations/v1_11/v104.go +++ b/models/migrations/v1_11/v104.go @@ -4,31 +4,14 @@ package v1_11 //nolint import ( - "code.gitea.io/gitea/models/migrations/base" - "xorm.io/xorm" ) -func RemoveLabelUneededCols(x *xorm.Engine) error { - // Make sure the columns exist before dropping them - type Label struct { - QueryString string - IsSelected bool - } - if err := x.Sync2(new(Label)); err != nil { - return err +func AddWhitelistDeployKeysToBranches(x *xorm.Engine) error { + type ProtectedBranch struct { + ID int64 + WhitelistDeployKeys bool `xorm:"NOT NULL DEFAULT false"` } - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - if err := base.DropTableColumns(sess, "label", "query_string"); err != nil { - return err - } - if err := base.DropTableColumns(sess, "label", "is_selected"); err != nil { - return err - } - return sess.Commit() + return x.Sync2(new(ProtectedBranch)) } diff --git a/models/migrations/v1_11/v105.go b/models/migrations/v1_11/v105.go index df261c992c8e3..c76554cf59f7a 100644 --- a/models/migrations/v1_11/v105.go +++ b/models/migrations/v1_11/v105.go @@ -4,20 +4,31 @@ package v1_11 //nolint import ( + "code.gitea.io/gitea/models/migrations/base" + "xorm.io/xorm" ) -func AddTeamIncludesAllRepositories(x *xorm.Engine) error { - type Team struct { - ID int64 `xorm:"pk autoincr"` - IncludesAllRepositories bool `xorm:"NOT NULL DEFAULT false"` +func RemoveLabelUneededCols(x *xorm.Engine) error { + // Make sure the columns exist before dropping them + type Label struct { + QueryString string + IsSelected bool } - - if err := x.Sync2(new(Team)); err != nil { + if err := x.Sync2(new(Label)); err != nil { return err } - _, err := x.Exec("UPDATE `team` SET `includes_all_repositories` = ? WHERE `name`=?", - true, "Owners") - return err + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + if err := base.DropTableColumns(sess, "label", "query_string"); err != nil { + return err + } + if err := base.DropTableColumns(sess, "label", "is_selected"); err != nil { + return err + } + return sess.Commit() } diff --git a/models/migrations/v1_11/v106.go b/models/migrations/v1_11/v106.go index 3e06309a8d8ec..df261c992c8e3 100644 --- a/models/migrations/v1_11/v106.go +++ b/models/migrations/v1_11/v106.go @@ -7,19 +7,17 @@ import ( "xorm.io/xorm" ) -// RepoWatchMode specifies what kind of watch the user has on a repository -type RepoWatchMode int8 - -// Watch is connection request for receiving repository notification. -type Watch struct { - ID int64 `xorm:"pk autoincr"` - Mode RepoWatchMode `xorm:"SMALLINT NOT NULL DEFAULT 1"` -} +func AddTeamIncludesAllRepositories(x *xorm.Engine) error { + type Team struct { + ID int64 `xorm:"pk autoincr"` + IncludesAllRepositories bool `xorm:"NOT NULL DEFAULT false"` + } -func AddModeColumnToWatch(x *xorm.Engine) (err error) { - if err = x.Sync2(new(Watch)); err != nil { - return + if err := x.Sync2(new(Team)); err != nil { + return err } - _, err = x.Exec("UPDATE `watch` SET `mode` = 1") + + _, err := x.Exec("UPDATE `team` SET `includes_all_repositories` = ? WHERE `name`=?", + true, "Owners") return err } diff --git a/models/migrations/v1_11/v107.go b/models/migrations/v1_11/v107.go index 1ffbd35dd714e..3e06309a8d8ec 100644 --- a/models/migrations/v1_11/v107.go +++ b/models/migrations/v1_11/v107.go @@ -7,11 +7,19 @@ import ( "xorm.io/xorm" ) -func AddTemplateToRepo(x *xorm.Engine) error { - type Repository struct { - IsTemplate bool `xorm:"INDEX NOT NULL DEFAULT false"` - TemplateID int64 `xorm:"INDEX"` - } +// RepoWatchMode specifies what kind of watch the user has on a repository +type RepoWatchMode int8 + +// Watch is connection request for receiving repository notification. +type Watch struct { + ID int64 `xorm:"pk autoincr"` + Mode RepoWatchMode `xorm:"SMALLINT NOT NULL DEFAULT 1"` +} - return x.Sync2(new(Repository)) +func AddModeColumnToWatch(x *xorm.Engine) (err error) { + if err = x.Sync2(new(Watch)); err != nil { + return + } + _, err = x.Exec("UPDATE `watch` SET `mode` = 1") + return err } diff --git a/models/migrations/v1_11/v108.go b/models/migrations/v1_11/v108.go index 28132c377d9fe..1ffbd35dd714e 100644 --- a/models/migrations/v1_11/v108.go +++ b/models/migrations/v1_11/v108.go @@ -7,11 +7,11 @@ import ( "xorm.io/xorm" ) -func AddCommentIDOnNotification(x *xorm.Engine) error { - type Notification struct { - ID int64 `xorm:"pk autoincr"` - CommentID int64 +func AddTemplateToRepo(x *xorm.Engine) error { + type Repository struct { + IsTemplate bool `xorm:"INDEX NOT NULL DEFAULT false"` + TemplateID int64 `xorm:"INDEX"` } - return x.Sync2(new(Notification)) + return x.Sync2(new(Repository)) } diff --git a/models/migrations/v1_11/v109.go b/models/migrations/v1_11/v109.go index d2ee9a6aaa21d..28132c377d9fe 100644 --- a/models/migrations/v1_11/v109.go +++ b/models/migrations/v1_11/v109.go @@ -7,10 +7,11 @@ import ( "xorm.io/xorm" ) -func AddCanCreateOrgRepoColumnForTeam(x *xorm.Engine) error { - type Team struct { - CanCreateOrgRepo bool `xorm:"NOT NULL DEFAULT false"` +func AddCommentIDOnNotification(x *xorm.Engine) error { + type Notification struct { + ID int64 `xorm:"pk autoincr"` + CommentID int64 } - return x.Sync2(new(Team)) + return x.Sync2(new(Notification)) } diff --git a/models/migrations/v1_11/v110.go b/models/migrations/v1_11/v110.go index 81afa1331d4ff..d2ee9a6aaa21d 100644 --- a/models/migrations/v1_11/v110.go +++ b/models/migrations/v1_11/v110.go @@ -5,25 +5,12 @@ package v1_11 //nolint import ( "xorm.io/xorm" - "xorm.io/xorm/schemas" ) -func ChangeReviewContentToText(x *xorm.Engine) error { - switch x.Dialect().URI().DBType { - case schemas.MYSQL: - _, err := x.Exec("ALTER TABLE review MODIFY COLUMN content TEXT") - return err - case schemas.ORACLE: - _, err := x.Exec("ALTER TABLE review MODIFY content TEXT") - return err - case schemas.MSSQL: - _, err := x.Exec("ALTER TABLE review ALTER COLUMN content TEXT") - return err - case schemas.POSTGRES: - _, err := x.Exec("ALTER TABLE review ALTER COLUMN content TYPE TEXT") - return err - default: - // SQLite doesn't support ALTER COLUMN, and it seem to already make String to _TEXT_ default so no migration needed - return nil +func AddCanCreateOrgRepoColumnForTeam(x *xorm.Engine) error { + type Team struct { + CanCreateOrgRepo bool `xorm:"NOT NULL DEFAULT false"` } + + return x.Sync2(new(Team)) } diff --git a/models/migrations/v1_11/v111.go b/models/migrations/v1_11/v111.go index 5b15c32163680..81afa1331d4ff 100644 --- a/models/migrations/v1_11/v111.go +++ b/models/migrations/v1_11/v111.go @@ -4,435 +4,26 @@ package v1_11 //nolint import ( - "fmt" - "xorm.io/xorm" + "xorm.io/xorm/schemas" ) -func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error { - type ProtectedBranch struct { - CanPush bool `xorm:"NOT NULL DEFAULT false"` - EnableApprovalsWhitelist bool `xorm:"NOT NULL DEFAULT false"` - ApprovalsWhitelistUserIDs []int64 `xorm:"JSON TEXT"` - ApprovalsWhitelistTeamIDs []int64 `xorm:"JSON TEXT"` - RequiredApprovals int64 `xorm:"NOT NULL DEFAULT 0"` - } - - type User struct { - ID int64 `xorm:"pk autoincr"` - Type int - - // Permissions - IsAdmin bool - IsRestricted bool `xorm:"NOT NULL DEFAULT false"` - Visibility int `xorm:"NOT NULL DEFAULT 0"` - } - - type Review struct { - ID int64 `xorm:"pk autoincr"` - Official bool `xorm:"NOT NULL DEFAULT false"` - - ReviewerID int64 `xorm:"index"` - IssueID int64 `xorm:"index"` - } - - if err := x.Sync2(new(ProtectedBranch)); err != nil { +func ChangeReviewContentToText(x *xorm.Engine) error { + switch x.Dialect().URI().DBType { + case schemas.MYSQL: + _, err := x.Exec("ALTER TABLE review MODIFY COLUMN content TEXT") return err - } - - if err := x.Sync2(new(Review)); err != nil { + case schemas.ORACLE: + _, err := x.Exec("ALTER TABLE review MODIFY content TEXT") return err - } - - const ( - // ReviewTypeApprove approves changes - ReviewTypeApprove int = 1 - // ReviewTypeReject gives feedback blocking merge - ReviewTypeReject int = 3 - - // VisibleTypePublic Visible for everyone - VisibleTypePublic int = 0 - // VisibleTypePrivate Visible only for organization's members - VisibleTypePrivate int = 2 - - // unit.UnitTypeCode is unit type code - UnitTypeCode int = 1 - - // AccessModeNone no access - AccessModeNone int = 0 - // AccessModeRead read access - AccessModeRead int = 1 - // AccessModeWrite write access - AccessModeWrite int = 2 - // AccessModeOwner owner access - AccessModeOwner int = 4 - ) - - // Repository represents a git repository. - type Repository struct { - ID int64 `xorm:"pk autoincr"` - OwnerID int64 `xorm:"UNIQUE(s) index"` - - IsPrivate bool `xorm:"INDEX"` - } - - type PullRequest struct { - ID int64 `xorm:"pk autoincr"` - - BaseRepoID int64 `xorm:"INDEX"` - BaseBranch string - } - - // RepoUnit describes all units of a repository - type RepoUnit struct { - ID int64 - RepoID int64 `xorm:"INDEX(s)"` - Type int `xorm:"INDEX(s)"` - } - - type Permission struct { - AccessMode int - Units []*RepoUnit - UnitsMode map[int]int - } - - type TeamUser struct { - ID int64 `xorm:"pk autoincr"` - TeamID int64 `xorm:"UNIQUE(s)"` - UID int64 `xorm:"UNIQUE(s)"` - } - - type Collaboration struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` - UserID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` - Mode int `xorm:"DEFAULT 2 NOT NULL"` - } - - type Access struct { - ID int64 `xorm:"pk autoincr"` - UserID int64 `xorm:"UNIQUE(s)"` - RepoID int64 `xorm:"UNIQUE(s)"` - Mode int - } - - type TeamUnit struct { - ID int64 `xorm:"pk autoincr"` - OrgID int64 `xorm:"INDEX"` - TeamID int64 `xorm:"UNIQUE(s)"` - Type int `xorm:"UNIQUE(s)"` - } - - // Team represents a organization team. - type Team struct { - ID int64 `xorm:"pk autoincr"` - OrgID int64 `xorm:"INDEX"` - Authorize int - } - - // getUserRepoPermission static function based on issues_model.IsOfficialReviewer at 5d78792385 - getUserRepoPermission := func(sess *xorm.Session, repo *Repository, user *User) (Permission, error) { - var perm Permission - - repoOwner := new(User) - has, err := sess.ID(repo.OwnerID).Get(repoOwner) - if err != nil || !has { - return perm, err - } - - // Prevent strangers from checking out public repo of private organization - // Allow user if they are collaborator of a repo within a private organization but not a member of the organization itself - hasOrgVisible := true - // Not SignedUser - if user == nil { - hasOrgVisible = repoOwner.Visibility == VisibleTypePublic - } else if !user.IsAdmin { - hasMemberWithUserID, err := sess. - Where("uid=?", user.ID). - And("org_id=?", repoOwner.ID). - Table("org_user"). - Exist() - if err != nil { - hasOrgVisible = false - } - if (repoOwner.Visibility == VisibleTypePrivate || user.IsRestricted) && !hasMemberWithUserID { - hasOrgVisible = false - } - } - - isCollaborator, err := sess.Get(&Collaboration{RepoID: repo.ID, UserID: user.ID}) - if err != nil { - return perm, err - } - - if repoOwner.Type == 1 && !hasOrgVisible && !isCollaborator { - perm.AccessMode = AccessModeNone - return perm, err - } - - var units []*RepoUnit - if err := sess.Where("repo_id = ?", repo.ID).Find(&units); err != nil { - return perm, err - } - perm.Units = units - - // anonymous visit public repo - if user == nil { - perm.AccessMode = AccessModeRead - return perm, err - } - - // Admin or the owner has super access to the repository - if user.IsAdmin || user.ID == repo.OwnerID { - perm.AccessMode = AccessModeOwner - return perm, err - } - - accessLevel := func(user *User, repo *Repository) (int, error) { - mode := AccessModeNone - var userID int64 - restricted := false - - if user != nil { - userID = user.ID - restricted = user.IsRestricted - } - - if !restricted && !repo.IsPrivate { - mode = AccessModeRead - } - - if userID == 0 { - return mode, nil - } - - if userID == repo.OwnerID { - return AccessModeOwner, nil - } - - a := &Access{UserID: userID, RepoID: repo.ID} - if has, err := sess.Get(a); !has || err != nil { - return mode, err - } - return a.Mode, nil - } - - // plain user - perm.AccessMode, err = accessLevel(user, repo) - if err != nil { - return perm, err - } - - // If Owner is no Org - if repoOwner.Type != 1 { - return perm, err - } - - perm.UnitsMode = make(map[int]int) - - // Collaborators on organization - if isCollaborator { - for _, u := range units { - perm.UnitsMode[u.Type] = perm.AccessMode - } - } - - // get units mode from teams - var teams []*Team - err = sess. - Join("INNER", "team_user", "team_user.team_id = team.id"). - Join("INNER", "team_repo", "team_repo.team_id = team.id"). - Where("team.org_id = ?", repo.OwnerID). - And("team_user.uid=?", user.ID). - And("team_repo.repo_id=?", repo.ID). - Find(&teams) - if err != nil { - return perm, err - } - - // if user in an owner team - for _, team := range teams { - if team.Authorize >= AccessModeOwner { - perm.AccessMode = AccessModeOwner - perm.UnitsMode = nil - return perm, err - } - } - - for _, u := range units { - var found bool - for _, team := range teams { - - var teamU []*TeamUnit - var unitEnabled bool - err = sess.Where("team_id = ?", team.ID).Find(&teamU) - - for _, tu := range teamU { - if tu.Type == u.Type { - unitEnabled = true - break - } - } - - if unitEnabled { - m := perm.UnitsMode[u.Type] - if m < team.Authorize { - perm.UnitsMode[u.Type] = team.Authorize - } - found = true - } - } - - // for a public repo on an organization, a non-restricted user has read permission on non-team defined units. - if !found && !repo.IsPrivate && !user.IsRestricted { - if _, ok := perm.UnitsMode[u.Type]; !ok { - perm.UnitsMode[u.Type] = AccessModeRead - } - } - } - - // remove no permission units - perm.Units = make([]*RepoUnit, 0, len(units)) - for t := range perm.UnitsMode { - for _, u := range units { - if u.Type == t { - perm.Units = append(perm.Units, u) - } - } - } - - return perm, err - } - - // isOfficialReviewer static function based on 5d78792385 - isOfficialReviewer := func(sess *xorm.Session, issueID int64, reviewer *User) (bool, error) { - pr := new(PullRequest) - has, err := sess.ID(issueID).Get(pr) - if err != nil { - return false, err - } else if !has { - return false, fmt.Errorf("PullRequest for issueID %d not exist", issueID) - } - - baseRepo := new(Repository) - has, err = sess.ID(pr.BaseRepoID).Get(baseRepo) - if err != nil { - return false, err - } else if !has { - return false, fmt.Errorf("baseRepo with id %d not exist", pr.BaseRepoID) - } - protectedBranch := new(ProtectedBranch) - has, err = sess.Where("repo_id=? AND branch_name=?", baseRepo.ID, pr.BaseBranch).Get(protectedBranch) - if err != nil { - return false, err - } - if !has { - return false, nil - } - - if !protectedBranch.EnableApprovalsWhitelist { - - perm, err := getUserRepoPermission(sess, baseRepo, reviewer) - if err != nil { - return false, err - } - if perm.UnitsMode == nil { - for _, u := range perm.Units { - if u.Type == UnitTypeCode { - return AccessModeWrite <= perm.AccessMode, nil - } - } - return false, nil - } - return AccessModeWrite <= perm.UnitsMode[UnitTypeCode], nil - } - for _, id := range protectedBranch.ApprovalsWhitelistUserIDs { - if id == reviewer.ID { - return true, nil - } - } - - // isUserInTeams - return sess.Where("uid=?", reviewer.ID).In("team_id", protectedBranch.ApprovalsWhitelistTeamIDs).Exist(new(TeamUser)) - } - - if _, err := x.Exec("UPDATE `protected_branch` SET `enable_whitelist` = ? WHERE enable_whitelist IS NULL", false); err != nil { - return err - } - if _, err := x.Exec("UPDATE `protected_branch` SET `can_push` = `enable_whitelist`"); err != nil { + case schemas.MSSQL: + _, err := x.Exec("ALTER TABLE review ALTER COLUMN content TEXT") return err - } - if _, err := x.Exec("UPDATE `protected_branch` SET `enable_approvals_whitelist` = ? WHERE `required_approvals` > ?", true, 0); err != nil { + case schemas.POSTGRES: + _, err := x.Exec("ALTER TABLE review ALTER COLUMN content TYPE TEXT") return err - } - - var pageSize int64 = 20 - qresult, err := x.QueryInterface("SELECT max(id) as max_id FROM issue") - if err != nil { - return err - } - var totalIssues int64 - totalIssues, ok := qresult[0]["max_id"].(int64) - if !ok { - // If there are no issues at all we ignore it - return nil - } - totalPages := totalIssues / pageSize - - executeBody := func(page, pageSize int64) error { - // Find latest review of each user in each pull request, and set official field if appropriate - reviews := []*Review{} - - if err := x.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id > ? AND issue_id <= ? AND type in (?, ?) GROUP BY issue_id, reviewer_id)", - page*pageSize, (page+1)*pageSize, ReviewTypeApprove, ReviewTypeReject). - Find(&reviews); err != nil { - return err - } - - if len(reviews) == 0 { - return nil - } - - sess := x.NewSession() - defer sess.Close() - - if err := sess.Begin(); err != nil { - return err - } - - var updated int - for _, review := range reviews { - reviewer := new(User) - has, err := sess.ID(review.ReviewerID).Get(reviewer) - if err != nil || !has { - // Error might occur if user doesn't exist, ignore it. - continue - } - - official, err := isOfficialReviewer(sess, review.IssueID, reviewer) - if err != nil { - // Branch might not be proteced or other error, ignore it. - continue - } - review.Official = official - updated++ - if _, err := sess.ID(review.ID).Cols("official").Update(review); err != nil { - return err - } - } - - if updated > 0 { - return sess.Commit() - } + default: + // SQLite doesn't support ALTER COLUMN, and it seem to already make String to _TEXT_ default so no migration needed return nil } - - var page int64 - for page = 0; page <= totalPages; page++ { - if err := executeBody(page, pageSize); err != nil { - return err - } - } - - return nil } diff --git a/models/migrations/v1_11/v112.go b/models/migrations/v1_11/v112.go index 0857663119535..5b15c32163680 100644 --- a/models/migrations/v1_11/v112.go +++ b/models/migrations/v1_11/v112.go @@ -5,43 +5,434 @@ package v1_11 //nolint import ( "fmt" - "path/filepath" - "code.gitea.io/gitea/modules/setting" - "code.gitea.io/gitea/modules/util" - - "xorm.io/builder" "xorm.io/xorm" ) -func RemoveAttachmentMissedRepo(x *xorm.Engine) error { - type Attachment struct { - UUID string `xorm:"uuid"` +func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error { + type ProtectedBranch struct { + CanPush bool `xorm:"NOT NULL DEFAULT false"` + EnableApprovalsWhitelist bool `xorm:"NOT NULL DEFAULT false"` + ApprovalsWhitelistUserIDs []int64 `xorm:"JSON TEXT"` + ApprovalsWhitelistTeamIDs []int64 `xorm:"JSON TEXT"` + RequiredApprovals int64 `xorm:"NOT NULL DEFAULT 0"` + } + + type User struct { + ID int64 `xorm:"pk autoincr"` + Type int + + // Permissions + IsAdmin bool + IsRestricted bool `xorm:"NOT NULL DEFAULT false"` + Visibility int `xorm:"NOT NULL DEFAULT 0"` + } + + type Review struct { + ID int64 `xorm:"pk autoincr"` + Official bool `xorm:"NOT NULL DEFAULT false"` + + ReviewerID int64 `xorm:"index"` + IssueID int64 `xorm:"index"` + } + + if err := x.Sync2(new(ProtectedBranch)); err != nil { + return err + } + + if err := x.Sync2(new(Review)); err != nil { + return err + } + + const ( + // ReviewTypeApprove approves changes + ReviewTypeApprove int = 1 + // ReviewTypeReject gives feedback blocking merge + ReviewTypeReject int = 3 + + // VisibleTypePublic Visible for everyone + VisibleTypePublic int = 0 + // VisibleTypePrivate Visible only for organization's members + VisibleTypePrivate int = 2 + + // unit.UnitTypeCode is unit type code + UnitTypeCode int = 1 + + // AccessModeNone no access + AccessModeNone int = 0 + // AccessModeRead read access + AccessModeRead int = 1 + // AccessModeWrite write access + AccessModeWrite int = 2 + // AccessModeOwner owner access + AccessModeOwner int = 4 + ) + + // Repository represents a git repository. + type Repository struct { + ID int64 `xorm:"pk autoincr"` + OwnerID int64 `xorm:"UNIQUE(s) index"` + + IsPrivate bool `xorm:"INDEX"` + } + + type PullRequest struct { + ID int64 `xorm:"pk autoincr"` + + BaseRepoID int64 `xorm:"INDEX"` + BaseBranch string + } + + // RepoUnit describes all units of a repository + type RepoUnit struct { + ID int64 + RepoID int64 `xorm:"INDEX(s)"` + Type int `xorm:"INDEX(s)"` + } + + type Permission struct { + AccessMode int + Units []*RepoUnit + UnitsMode map[int]int + } + + type TeamUser struct { + ID int64 `xorm:"pk autoincr"` + TeamID int64 `xorm:"UNIQUE(s)"` + UID int64 `xorm:"UNIQUE(s)"` + } + + type Collaboration struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` + UserID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` + Mode int `xorm:"DEFAULT 2 NOT NULL"` + } + + type Access struct { + ID int64 `xorm:"pk autoincr"` + UserID int64 `xorm:"UNIQUE(s)"` + RepoID int64 `xorm:"UNIQUE(s)"` + Mode int + } + + type TeamUnit struct { + ID int64 `xorm:"pk autoincr"` + OrgID int64 `xorm:"INDEX"` + TeamID int64 `xorm:"UNIQUE(s)"` + Type int `xorm:"UNIQUE(s)"` + } + + // Team represents a organization team. + type Team struct { + ID int64 `xorm:"pk autoincr"` + OrgID int64 `xorm:"INDEX"` + Authorize int + } + + // getUserRepoPermission static function based on issues_model.IsOfficialReviewer at 5d78792385 + getUserRepoPermission := func(sess *xorm.Session, repo *Repository, user *User) (Permission, error) { + var perm Permission + + repoOwner := new(User) + has, err := sess.ID(repo.OwnerID).Get(repoOwner) + if err != nil || !has { + return perm, err + } + + // Prevent strangers from checking out public repo of private organization + // Allow user if they are collaborator of a repo within a private organization but not a member of the organization itself + hasOrgVisible := true + // Not SignedUser + if user == nil { + hasOrgVisible = repoOwner.Visibility == VisibleTypePublic + } else if !user.IsAdmin { + hasMemberWithUserID, err := sess. + Where("uid=?", user.ID). + And("org_id=?", repoOwner.ID). + Table("org_user"). + Exist() + if err != nil { + hasOrgVisible = false + } + if (repoOwner.Visibility == VisibleTypePrivate || user.IsRestricted) && !hasMemberWithUserID { + hasOrgVisible = false + } + } + + isCollaborator, err := sess.Get(&Collaboration{RepoID: repo.ID, UserID: user.ID}) + if err != nil { + return perm, err + } + + if repoOwner.Type == 1 && !hasOrgVisible && !isCollaborator { + perm.AccessMode = AccessModeNone + return perm, err + } + + var units []*RepoUnit + if err := sess.Where("repo_id = ?", repo.ID).Find(&units); err != nil { + return perm, err + } + perm.Units = units + + // anonymous visit public repo + if user == nil { + perm.AccessMode = AccessModeRead + return perm, err + } + + // Admin or the owner has super access to the repository + if user.IsAdmin || user.ID == repo.OwnerID { + perm.AccessMode = AccessModeOwner + return perm, err + } + + accessLevel := func(user *User, repo *Repository) (int, error) { + mode := AccessModeNone + var userID int64 + restricted := false + + if user != nil { + userID = user.ID + restricted = user.IsRestricted + } + + if !restricted && !repo.IsPrivate { + mode = AccessModeRead + } + + if userID == 0 { + return mode, nil + } + + if userID == repo.OwnerID { + return AccessModeOwner, nil + } + + a := &Access{UserID: userID, RepoID: repo.ID} + if has, err := sess.Get(a); !has || err != nil { + return mode, err + } + return a.Mode, nil + } + + // plain user + perm.AccessMode, err = accessLevel(user, repo) + if err != nil { + return perm, err + } + + // If Owner is no Org + if repoOwner.Type != 1 { + return perm, err + } + + perm.UnitsMode = make(map[int]int) + + // Collaborators on organization + if isCollaborator { + for _, u := range units { + perm.UnitsMode[u.Type] = perm.AccessMode + } + } + + // get units mode from teams + var teams []*Team + err = sess. + Join("INNER", "team_user", "team_user.team_id = team.id"). + Join("INNER", "team_repo", "team_repo.team_id = team.id"). + Where("team.org_id = ?", repo.OwnerID). + And("team_user.uid=?", user.ID). + And("team_repo.repo_id=?", repo.ID). + Find(&teams) + if err != nil { + return perm, err + } + + // if user in an owner team + for _, team := range teams { + if team.Authorize >= AccessModeOwner { + perm.AccessMode = AccessModeOwner + perm.UnitsMode = nil + return perm, err + } + } + + for _, u := range units { + var found bool + for _, team := range teams { + + var teamU []*TeamUnit + var unitEnabled bool + err = sess.Where("team_id = ?", team.ID).Find(&teamU) + + for _, tu := range teamU { + if tu.Type == u.Type { + unitEnabled = true + break + } + } + + if unitEnabled { + m := perm.UnitsMode[u.Type] + if m < team.Authorize { + perm.UnitsMode[u.Type] = team.Authorize + } + found = true + } + } + + // for a public repo on an organization, a non-restricted user has read permission on non-team defined units. + if !found && !repo.IsPrivate && !user.IsRestricted { + if _, ok := perm.UnitsMode[u.Type]; !ok { + perm.UnitsMode[u.Type] = AccessModeRead + } + } + } + + // remove no permission units + perm.Units = make([]*RepoUnit, 0, len(units)) + for t := range perm.UnitsMode { + for _, u := range units { + if u.Type == t { + perm.Units = append(perm.Units, u) + } + } + } + + return perm, err } - var start int - attachments := make([]*Attachment, 0, 50) - for { - err := x.Select("uuid").Where(builder.NotIn("release_id", builder.Select("id").From("`release`"))). - And("release_id > 0"). - OrderBy("id").Limit(50, start).Find(&attachments) + + // isOfficialReviewer static function based on 5d78792385 + isOfficialReviewer := func(sess *xorm.Session, issueID int64, reviewer *User) (bool, error) { + pr := new(PullRequest) + has, err := sess.ID(issueID).Get(pr) + if err != nil { + return false, err + } else if !has { + return false, fmt.Errorf("PullRequest for issueID %d not exist", issueID) + } + + baseRepo := new(Repository) + has, err = sess.ID(pr.BaseRepoID).Get(baseRepo) + if err != nil { + return false, err + } else if !has { + return false, fmt.Errorf("baseRepo with id %d not exist", pr.BaseRepoID) + } + protectedBranch := new(ProtectedBranch) + has, err = sess.Where("repo_id=? AND branch_name=?", baseRepo.ID, pr.BaseBranch).Get(protectedBranch) if err != nil { + return false, err + } + if !has { + return false, nil + } + + if !protectedBranch.EnableApprovalsWhitelist { + + perm, err := getUserRepoPermission(sess, baseRepo, reviewer) + if err != nil { + return false, err + } + if perm.UnitsMode == nil { + for _, u := range perm.Units { + if u.Type == UnitTypeCode { + return AccessModeWrite <= perm.AccessMode, nil + } + } + return false, nil + } + return AccessModeWrite <= perm.UnitsMode[UnitTypeCode], nil + } + for _, id := range protectedBranch.ApprovalsWhitelistUserIDs { + if id == reviewer.ID { + return true, nil + } + } + + // isUserInTeams + return sess.Where("uid=?", reviewer.ID).In("team_id", protectedBranch.ApprovalsWhitelistTeamIDs).Exist(new(TeamUser)) + } + + if _, err := x.Exec("UPDATE `protected_branch` SET `enable_whitelist` = ? WHERE enable_whitelist IS NULL", false); err != nil { + return err + } + if _, err := x.Exec("UPDATE `protected_branch` SET `can_push` = `enable_whitelist`"); err != nil { + return err + } + if _, err := x.Exec("UPDATE `protected_branch` SET `enable_approvals_whitelist` = ? WHERE `required_approvals` > ?", true, 0); err != nil { + return err + } + + var pageSize int64 = 20 + qresult, err := x.QueryInterface("SELECT max(id) as max_id FROM issue") + if err != nil { + return err + } + var totalIssues int64 + totalIssues, ok := qresult[0]["max_id"].(int64) + if !ok { + // If there are no issues at all we ignore it + return nil + } + totalPages := totalIssues / pageSize + + executeBody := func(page, pageSize int64) error { + // Find latest review of each user in each pull request, and set official field if appropriate + reviews := []*Review{} + + if err := x.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id > ? AND issue_id <= ? AND type in (?, ?) GROUP BY issue_id, reviewer_id)", + page*pageSize, (page+1)*pageSize, ReviewTypeApprove, ReviewTypeReject). + Find(&reviews); err != nil { + return err + } + + if len(reviews) == 0 { + return nil + } + + sess := x.NewSession() + defer sess.Close() + + if err := sess.Begin(); err != nil { return err } - for i := 0; i < len(attachments); i++ { - uuid := attachments[i].UUID - if err = util.RemoveAll(filepath.Join(setting.Attachment.Storage.Path, uuid[0:1], uuid[1:2], uuid)); err != nil { - fmt.Printf("Error: %v", err) //nolint:forbidigo + var updated int + for _, review := range reviews { + reviewer := new(User) + has, err := sess.ID(review.ReviewerID).Get(reviewer) + if err != nil || !has { + // Error might occur if user doesn't exist, ignore it. + continue + } + + official, err := isOfficialReviewer(sess, review.IssueID, reviewer) + if err != nil { + // Branch might not be proteced or other error, ignore it. + continue + } + review.Official = official + updated++ + if _, err := sess.ID(review.ID).Cols("official").Update(review); err != nil { + return err } } - if len(attachments) < 50 { - break + if updated > 0 { + return sess.Commit() + } + return nil + } + + var page int64 + for page = 0; page <= totalPages; page++ { + if err := executeBody(page, pageSize); err != nil { + return err } - start += 50 - attachments = attachments[:0] } - _, err := x.Exec("DELETE FROM attachment WHERE release_id > 0 AND release_id NOT IN (SELECT id FROM `release`)") - return err + return nil } diff --git a/models/migrations/v1_11/v113.go b/models/migrations/v1_11/v113.go index dc9adb60b05d6..0857663119535 100644 --- a/models/migrations/v1_11/v113.go +++ b/models/migrations/v1_11/v113.go @@ -5,18 +5,43 @@ package v1_11 //nolint import ( "fmt" + "path/filepath" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/util" + + "xorm.io/builder" "xorm.io/xorm" ) -func FeatureChangeTargetBranch(x *xorm.Engine) error { - type Comment struct { - OldRef string - NewRef string +func RemoveAttachmentMissedRepo(x *xorm.Engine) error { + type Attachment struct { + UUID string `xorm:"uuid"` } + var start int + attachments := make([]*Attachment, 0, 50) + for { + err := x.Select("uuid").Where(builder.NotIn("release_id", builder.Select("id").From("`release`"))). + And("release_id > 0"). + OrderBy("id").Limit(50, start).Find(&attachments) + if err != nil { + return err + } + + for i := 0; i < len(attachments); i++ { + uuid := attachments[i].UUID + if err = util.RemoveAll(filepath.Join(setting.Attachment.Storage.Path, uuid[0:1], uuid[1:2], uuid)); err != nil { + fmt.Printf("Error: %v", err) //nolint:forbidigo + } + } - if err := x.Sync2(new(Comment)); err != nil { - return fmt.Errorf("Sync2: %w", err) + if len(attachments) < 50 { + break + } + start += 50 + attachments = attachments[:0] } - return nil + + _, err := x.Exec("DELETE FROM attachment WHERE release_id > 0 AND release_id NOT IN (SELECT id FROM `release`)") + return err } diff --git a/models/migrations/v1_11/v114.go b/models/migrations/v1_11/v114.go index 95adcee989c1f..dc9adb60b05d6 100644 --- a/models/migrations/v1_11/v114.go +++ b/models/migrations/v1_11/v114.go @@ -4,47 +4,19 @@ package v1_11 //nolint import ( - "net/url" + "fmt" "xorm.io/xorm" ) -func SanitizeOriginalURL(x *xorm.Engine) error { - type Repository struct { - ID int64 - OriginalURL string `xorm:"VARCHAR(2048)"` +func FeatureChangeTargetBranch(x *xorm.Engine) error { + type Comment struct { + OldRef string + NewRef string } - var last int - const batchSize = 50 - for { - results := make([]Repository, 0, batchSize) - err := x.Where("original_url <> '' AND original_url IS NOT NULL"). - And("original_service_type = 0 OR original_service_type IS NULL"). - OrderBy("id"). - Limit(batchSize, last). - Find(&results) - if err != nil { - return err - } - if len(results) == 0 { - break - } - last += len(results) - - for _, res := range results { - u, err := url.Parse(res.OriginalURL) - if err != nil { - // it is ok to continue here, we only care about fixing URLs that we can read - continue - } - u.User = nil - originalURL := u.String() - _, err = x.Exec("UPDATE repository SET original_url = ? WHERE id = ?", originalURL, res.ID) - if err != nil { - return err - } - } + if err := x.Sync2(new(Comment)); err != nil { + return fmt.Errorf("Sync2: %w", err) } return nil } diff --git a/models/migrations/v1_11/v115.go b/models/migrations/v1_11/v115.go index 8c631cfd0bbf3..95adcee989c1f 100644 --- a/models/migrations/v1_11/v115.go +++ b/models/migrations/v1_11/v115.go @@ -4,156 +4,47 @@ package v1_11 //nolint import ( - "crypto/md5" - "fmt" - "io" - "math" - "os" - "path/filepath" - "time" - - "code.gitea.io/gitea/modules/container" - "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/setting" - "code.gitea.io/gitea/modules/util" + "net/url" "xorm.io/xorm" ) -func RenameExistingUserAvatarName(x *xorm.Engine) error { - sess := x.NewSession() - defer sess.Close() - - type User struct { - ID int64 `xorm:"pk autoincr"` - LowerName string `xorm:"UNIQUE NOT NULL"` - Avatar string - } - - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - - count, err := x.Count(new(User)) - if err != nil { - return err +func SanitizeOriginalURL(x *xorm.Engine) error { + type Repository struct { + ID int64 + OriginalURL string `xorm:"VARCHAR(2048)"` } - log.Info("%d User Avatar(s) to migrate ...", count) - deleteList := make(container.Set[string]) - start := 0 - migrated := 0 + var last int + const batchSize = 50 for { - if err := sess.Begin(); err != nil { - return fmt.Errorf("session.Begin: %w", err) - } - users := make([]*User, 0, 50) - if err := sess.Table("user").Asc("id").Limit(50, start).Find(&users); err != nil { - return fmt.Errorf("select users from id [%d]: %w", start, err) + results := make([]Repository, 0, batchSize) + err := x.Where("original_url <> '' AND original_url IS NOT NULL"). + And("original_service_type = 0 OR original_service_type IS NULL"). + OrderBy("id"). + Limit(batchSize, last). + Find(&results) + if err != nil { + return err } - if len(users) == 0 { - _ = sess.Rollback() + if len(results) == 0 { break } + last += len(results) - log.Info("select users [%d - %d]", start, start+len(users)) - start += 50 - - for _, user := range users { - oldAvatar := user.Avatar - - if stat, err := os.Stat(filepath.Join(setting.Avatar.Storage.Path, oldAvatar)); err != nil || !stat.Mode().IsRegular() { - if err == nil { - err = fmt.Errorf("Error: \"%s\" is not a regular file", oldAvatar) - } - log.Warn("[user: %s] os.Stat: %v", user.LowerName, err) - // avatar doesn't exist in the storage - // no need to move avatar and update database - // we can just skip this - continue - } - - newAvatar, err := copyOldAvatarToNewLocation(user.ID, oldAvatar) + for _, res := range results { + u, err := url.Parse(res.OriginalURL) if err != nil { - _ = sess.Rollback() - return fmt.Errorf("[user: %s] %w", user.LowerName, err) - } else if newAvatar == oldAvatar { + // it is ok to continue here, we only care about fixing URLs that we can read continue } - - user.Avatar = newAvatar - if _, err := sess.ID(user.ID).Cols("avatar").Update(user); err != nil { - _ = sess.Rollback() - return fmt.Errorf("[user: %s] user table update: %w", user.LowerName, err) - } - - deleteList.Add(filepath.Join(setting.Avatar.Storage.Path, oldAvatar)) - migrated++ - select { - case <-ticker.C: - log.Info( - "%d/%d (%2.0f%%) User Avatar(s) migrated (%d old avatars to be deleted) in %d batches. %d Remaining ...", - migrated, - count, - float64(migrated)/float64(count)*100, - len(deleteList), - int(math.Ceil(float64(migrated)/float64(50))), - count-int64(migrated)) - default: + u.User = nil + originalURL := u.String() + _, err = x.Exec("UPDATE repository SET original_url = ? WHERE id = ?", originalURL, res.ID) + if err != nil { + return err } } - if err := sess.Commit(); err != nil { - _ = sess.Rollback() - return fmt.Errorf("commit session: %w", err) - } } - - deleteCount := len(deleteList) - log.Info("Deleting %d old avatars ...", deleteCount) - i := 0 - for file := range deleteList { - if err := util.Remove(file); err != nil { - log.Warn("util.Remove: %v", err) - } - i++ - select { - case <-ticker.C: - log.Info( - "%d/%d (%2.0f%%) Old User Avatar(s) deleted. %d Remaining ...", - i, - deleteCount, - float64(i)/float64(deleteCount)*100, - deleteCount-i) - default: - } - } - - log.Info("Completed migrating %d User Avatar(s) and deleting %d Old Avatars", count, deleteCount) - return nil } - -// copyOldAvatarToNewLocation copies oldAvatar to newAvatarLocation -// and returns newAvatar location -func copyOldAvatarToNewLocation(userID int64, oldAvatar string) (string, error) { - fr, err := os.Open(filepath.Join(setting.Avatar.Storage.Path, oldAvatar)) - if err != nil { - return "", fmt.Errorf("os.Open: %w", err) - } - defer fr.Close() - - data, err := io.ReadAll(fr) - if err != nil { - return "", fmt.Errorf("io.ReadAll: %w", err) - } - - newAvatar := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%d-%x", userID, md5.Sum(data))))) - if newAvatar == oldAvatar { - return newAvatar, nil - } - - if err := os.WriteFile(filepath.Join(setting.Avatar.Storage.Path, newAvatar), data, 0o666); err != nil { - return "", fmt.Errorf("os.WriteFile: %w", err) - } - - return newAvatar, nil -} diff --git a/models/migrations/v1_11/v116.go b/models/migrations/v1_11/v116.go index 73fddd1039510..8c631cfd0bbf3 100644 --- a/models/migrations/v1_11/v116.go +++ b/models/migrations/v1_11/v116.go @@ -4,29 +4,156 @@ package v1_11 //nolint import ( + "crypto/md5" + "fmt" + "io" + "math" + "os" + "path/filepath" + "time" + + "code.gitea.io/gitea/modules/container" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/util" + "xorm.io/xorm" ) -func ExtendTrackedTimes(x *xorm.Engine) error { - type TrackedTime struct { - Time int64 `xorm:"NOT NULL"` - Deleted bool `xorm:"NOT NULL DEFAULT false"` - } - +func RenameExistingUserAvatarName(x *xorm.Engine) error { sess := x.NewSession() defer sess.Close() - if err := sess.Begin(); err != nil { - return err + type User struct { + ID int64 `xorm:"pk autoincr"` + LowerName string `xorm:"UNIQUE NOT NULL"` + Avatar string } - if _, err := sess.Exec("DELETE FROM tracked_time WHERE time IS NULL"); err != nil { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + count, err := x.Count(new(User)) + if err != nil { return err } + log.Info("%d User Avatar(s) to migrate ...", count) - if err := sess.Sync2(new(TrackedTime)); err != nil { - return err + deleteList := make(container.Set[string]) + start := 0 + migrated := 0 + for { + if err := sess.Begin(); err != nil { + return fmt.Errorf("session.Begin: %w", err) + } + users := make([]*User, 0, 50) + if err := sess.Table("user").Asc("id").Limit(50, start).Find(&users); err != nil { + return fmt.Errorf("select users from id [%d]: %w", start, err) + } + if len(users) == 0 { + _ = sess.Rollback() + break + } + + log.Info("select users [%d - %d]", start, start+len(users)) + start += 50 + + for _, user := range users { + oldAvatar := user.Avatar + + if stat, err := os.Stat(filepath.Join(setting.Avatar.Storage.Path, oldAvatar)); err != nil || !stat.Mode().IsRegular() { + if err == nil { + err = fmt.Errorf("Error: \"%s\" is not a regular file", oldAvatar) + } + log.Warn("[user: %s] os.Stat: %v", user.LowerName, err) + // avatar doesn't exist in the storage + // no need to move avatar and update database + // we can just skip this + continue + } + + newAvatar, err := copyOldAvatarToNewLocation(user.ID, oldAvatar) + if err != nil { + _ = sess.Rollback() + return fmt.Errorf("[user: %s] %w", user.LowerName, err) + } else if newAvatar == oldAvatar { + continue + } + + user.Avatar = newAvatar + if _, err := sess.ID(user.ID).Cols("avatar").Update(user); err != nil { + _ = sess.Rollback() + return fmt.Errorf("[user: %s] user table update: %w", user.LowerName, err) + } + + deleteList.Add(filepath.Join(setting.Avatar.Storage.Path, oldAvatar)) + migrated++ + select { + case <-ticker.C: + log.Info( + "%d/%d (%2.0f%%) User Avatar(s) migrated (%d old avatars to be deleted) in %d batches. %d Remaining ...", + migrated, + count, + float64(migrated)/float64(count)*100, + len(deleteList), + int(math.Ceil(float64(migrated)/float64(50))), + count-int64(migrated)) + default: + } + } + if err := sess.Commit(); err != nil { + _ = sess.Rollback() + return fmt.Errorf("commit session: %w", err) + } + } + + deleteCount := len(deleteList) + log.Info("Deleting %d old avatars ...", deleteCount) + i := 0 + for file := range deleteList { + if err := util.Remove(file); err != nil { + log.Warn("util.Remove: %v", err) + } + i++ + select { + case <-ticker.C: + log.Info( + "%d/%d (%2.0f%%) Old User Avatar(s) deleted. %d Remaining ...", + i, + deleteCount, + float64(i)/float64(deleteCount)*100, + deleteCount-i) + default: + } + } + + log.Info("Completed migrating %d User Avatar(s) and deleting %d Old Avatars", count, deleteCount) + + return nil +} + +// copyOldAvatarToNewLocation copies oldAvatar to newAvatarLocation +// and returns newAvatar location +func copyOldAvatarToNewLocation(userID int64, oldAvatar string) (string, error) { + fr, err := os.Open(filepath.Join(setting.Avatar.Storage.Path, oldAvatar)) + if err != nil { + return "", fmt.Errorf("os.Open: %w", err) + } + defer fr.Close() + + data, err := io.ReadAll(fr) + if err != nil { + return "", fmt.Errorf("io.ReadAll: %w", err) + } + + newAvatar := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%d-%x", userID, md5.Sum(data))))) + if newAvatar == oldAvatar { + return newAvatar, nil + } + + if err := os.WriteFile(filepath.Join(setting.Avatar.Storage.Path, newAvatar), data, 0o666); err != nil { + return "", fmt.Errorf("os.WriteFile: %w", err) } - return sess.Commit() + return newAvatar, nil } diff --git a/models/migrations/v1_11/v117.go b/models/migrations/v1_11/v117.go new file mode 100644 index 0000000000000..73fddd1039510 --- /dev/null +++ b/models/migrations/v1_11/v117.go @@ -0,0 +1,32 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_11 //nolint + +import ( + "xorm.io/xorm" +) + +func ExtendTrackedTimes(x *xorm.Engine) error { + type TrackedTime struct { + Time int64 `xorm:"NOT NULL"` + Deleted bool `xorm:"NOT NULL DEFAULT false"` + } + + sess := x.NewSession() + defer sess.Close() + + if err := sess.Begin(); err != nil { + return err + } + + if _, err := sess.Exec("DELETE FROM tracked_time WHERE time IS NULL"); err != nil { + return err + } + + if err := sess.Sync2(new(TrackedTime)); err != nil { + return err + } + + return sess.Commit() +} diff --git a/models/migrations/v1_12/v117.go b/models/migrations/v1_12/v117.go deleted file mode 100644 index bc768f3f0bb1d..0000000000000 --- a/models/migrations/v1_12/v117.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_12 //nolint - -import ( - "xorm.io/xorm" -) - -func AddBlockOnRejectedReviews(x *xorm.Engine) error { - type ProtectedBranch struct { - BlockOnRejectedReviews bool `xorm:"NOT NULL DEFAULT false"` - } - - return x.Sync2(new(ProtectedBranch)) -} diff --git a/models/migrations/v1_12/v118.go b/models/migrations/v1_12/v118.go index 9b893e2992bb4..bc768f3f0bb1d 100644 --- a/models/migrations/v1_12/v118.go +++ b/models/migrations/v1_12/v118.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. +// Copyright 2020 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_12 //nolint @@ -7,19 +7,10 @@ import ( "xorm.io/xorm" ) -func AddReviewCommitAndStale(x *xorm.Engine) error { - type Review struct { - CommitID string `xorm:"VARCHAR(40)"` - Stale bool `xorm:"NOT NULL DEFAULT false"` - } - +func AddBlockOnRejectedReviews(x *xorm.Engine) error { type ProtectedBranch struct { - DismissStaleApprovals bool `xorm:"NOT NULL DEFAULT false"` + BlockOnRejectedReviews bool `xorm:"NOT NULL DEFAULT false"` } - // Old reviews will have commit ID set to "" and not stale - if err := x.Sync2(new(Review)); err != nil { - return err - } return x.Sync2(new(ProtectedBranch)) } diff --git a/models/migrations/v1_12/v119.go b/models/migrations/v1_12/v119.go index 60bfe6a57da2b..9b893e2992bb4 100644 --- a/models/migrations/v1_12/v119.go +++ b/models/migrations/v1_12/v119.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Gitea Authors. All rights reserved. +// Copyright 2019 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_12 //nolint @@ -7,9 +7,19 @@ import ( "xorm.io/xorm" ) -func FixMigratedRepositoryServiceType(x *xorm.Engine) error { - // structs.GithubService: - // GithubService = 2 - _, err := x.Exec("UPDATE repository SET original_service_type = ? WHERE original_url LIKE 'https://github.com/%'", 2) - return err +func AddReviewCommitAndStale(x *xorm.Engine) error { + type Review struct { + CommitID string `xorm:"VARCHAR(40)"` + Stale bool `xorm:"NOT NULL DEFAULT false"` + } + + type ProtectedBranch struct { + DismissStaleApprovals bool `xorm:"NOT NULL DEFAULT false"` + } + + // Old reviews will have commit ID set to "" and not stale + if err := x.Sync2(new(Review)); err != nil { + return err + } + return x.Sync2(new(ProtectedBranch)) } diff --git a/models/migrations/v1_12/v120.go b/models/migrations/v1_12/v120.go index f4e61215eb019..60bfe6a57da2b 100644 --- a/models/migrations/v1_12/v120.go +++ b/models/migrations/v1_12/v120.go @@ -7,13 +7,9 @@ import ( "xorm.io/xorm" ) -func AddOwnerNameOnRepository(x *xorm.Engine) error { - type Repository struct { - OwnerName string - } - if err := x.Sync2(new(Repository)); err != nil { - return err - } - _, err := x.Exec("UPDATE repository SET owner_name = (SELECT name FROM `user` WHERE `user`.id = repository.owner_id)") +func FixMigratedRepositoryServiceType(x *xorm.Engine) error { + // structs.GithubService: + // GithubService = 2 + _, err := x.Exec("UPDATE repository SET original_service_type = ? WHERE original_url LIKE 'https://github.com/%'", 2) return err } diff --git a/models/migrations/v1_12/v121.go b/models/migrations/v1_12/v121.go index ac656b2d4225c..f4e61215eb019 100644 --- a/models/migrations/v1_12/v121.go +++ b/models/migrations/v1_12/v121.go @@ -3,14 +3,17 @@ package v1_12 //nolint -import "xorm.io/xorm" +import ( + "xorm.io/xorm" +) -func AddIsRestricted(x *xorm.Engine) error { - // User see models/user.go - type User struct { - ID int64 `xorm:"pk autoincr"` - IsRestricted bool `xorm:"NOT NULL DEFAULT false"` +func AddOwnerNameOnRepository(x *xorm.Engine) error { + type Repository struct { + OwnerName string } - - return x.Sync2(new(User)) + if err := x.Sync2(new(Repository)); err != nil { + return err + } + _, err := x.Exec("UPDATE repository SET owner_name = (SELECT name FROM `user` WHERE `user`.id = repository.owner_id)") + return err } diff --git a/models/migrations/v1_12/v122.go b/models/migrations/v1_12/v122.go index 67ac9411faab5..ac656b2d4225c 100644 --- a/models/migrations/v1_12/v122.go +++ b/models/migrations/v1_12/v122.go @@ -3,14 +3,14 @@ package v1_12 //nolint -import ( - "xorm.io/xorm" -) +import "xorm.io/xorm" -func AddRequireSignedCommits(x *xorm.Engine) error { - type ProtectedBranch struct { - RequireSignedCommits bool `xorm:"NOT NULL DEFAULT false"` +func AddIsRestricted(x *xorm.Engine) error { + // User see models/user.go + type User struct { + ID int64 `xorm:"pk autoincr"` + IsRestricted bool `xorm:"NOT NULL DEFAULT false"` } - return x.Sync2(new(ProtectedBranch)) + return x.Sync2(new(User)) } diff --git a/models/migrations/v1_12/v123.go b/models/migrations/v1_12/v123.go index ec56161afa6d0..67ac9411faab5 100644 --- a/models/migrations/v1_12/v123.go +++ b/models/migrations/v1_12/v123.go @@ -7,11 +7,10 @@ import ( "xorm.io/xorm" ) -func AddReactionOriginals(x *xorm.Engine) error { - type Reaction struct { - OriginalAuthorID int64 `xorm:"INDEX NOT NULL DEFAULT(0)"` - OriginalAuthor string +func AddRequireSignedCommits(x *xorm.Engine) error { + type ProtectedBranch struct { + RequireSignedCommits bool `xorm:"NOT NULL DEFAULT false"` } - return x.Sync2(new(Reaction)) + return x.Sync2(new(ProtectedBranch)) } diff --git a/models/migrations/v1_12/v124.go b/models/migrations/v1_12/v124.go index 311690c039b3c..ec56161afa6d0 100644 --- a/models/migrations/v1_12/v124.go +++ b/models/migrations/v1_12/v124.go @@ -7,17 +7,11 @@ import ( "xorm.io/xorm" ) -func AddUserRepoMissingColumns(x *xorm.Engine) error { - type VisibleType int - type User struct { - PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'pbkdf2'"` - Visibility VisibleType `xorm:"NOT NULL DEFAULT 0"` +func AddReactionOriginals(x *xorm.Engine) error { + type Reaction struct { + OriginalAuthorID int64 `xorm:"INDEX NOT NULL DEFAULT(0)"` + OriginalAuthor string } - type Repository struct { - IsArchived bool `xorm:"INDEX"` - Topics []string `xorm:"TEXT JSON"` - } - - return x.Sync2(new(User), new(Repository)) + return x.Sync2(new(Reaction)) } diff --git a/models/migrations/v1_12/v125.go b/models/migrations/v1_12/v125.go index 5540dfd626fb9..311690c039b3c 100644 --- a/models/migrations/v1_12/v125.go +++ b/models/migrations/v1_12/v125.go @@ -4,19 +4,20 @@ package v1_12 //nolint import ( - "fmt" - "xorm.io/xorm" ) -func AddReviewMigrateInfo(x *xorm.Engine) error { - type Review struct { - OriginalAuthor string - OriginalAuthorID int64 +func AddUserRepoMissingColumns(x *xorm.Engine) error { + type VisibleType int + type User struct { + PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'pbkdf2'"` + Visibility VisibleType `xorm:"NOT NULL DEFAULT 0"` } - if err := x.Sync2(new(Review)); err != nil { - return fmt.Errorf("Sync2: %w", err) + type Repository struct { + IsArchived bool `xorm:"INDEX"` + Topics []string `xorm:"TEXT JSON"` } - return nil + + return x.Sync2(new(User), new(Repository)) } diff --git a/models/migrations/v1_12/v126.go b/models/migrations/v1_12/v126.go index ca9ec3aa3f340..5540dfd626fb9 100644 --- a/models/migrations/v1_12/v126.go +++ b/models/migrations/v1_12/v126.go @@ -4,21 +4,19 @@ package v1_12 //nolint import ( - "xorm.io/builder" + "fmt" + "xorm.io/xorm" ) -func FixTopicRepositoryCount(x *xorm.Engine) error { - _, err := x.Exec(builder.Delete(builder.NotIn("`repo_id`", builder.Select("`id`").From("`repository`"))).From("`repo_topic`")) - if err != nil { - return err +func AddReviewMigrateInfo(x *xorm.Engine) error { + type Review struct { + OriginalAuthor string + OriginalAuthorID int64 } - _, err = x.Exec(builder.Update( - builder.Eq{ - "`repo_count`": builder.Select("count(*)").From("`repo_topic`").Where(builder.Eq{ - "`repo_topic`.`topic_id`": builder.Expr("`topic`.`id`"), - }), - }).From("`topic`").Where(builder.Eq{"'1'": "1"})) - return err + if err := x.Sync2(new(Review)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } + return nil } diff --git a/models/migrations/v1_12/v127.go b/models/migrations/v1_12/v127.go index b248eb4c06cbd..ca9ec3aa3f340 100644 --- a/models/migrations/v1_12/v127.go +++ b/models/migrations/v1_12/v127.go @@ -4,41 +4,21 @@ package v1_12 //nolint import ( - "fmt" - - "code.gitea.io/gitea/modules/timeutil" - + "xorm.io/builder" "xorm.io/xorm" ) -func AddLanguageStats(x *xorm.Engine) error { - // LanguageStat see models/repo_language_stats.go - type LanguageStat struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` - CommitID string - IsPrimary bool - Language string `xorm:"VARCHAR(30) UNIQUE(s) INDEX NOT NULL"` - Percentage float32 `xorm:"NUMERIC(5,2) NOT NULL DEFAULT 0"` - Color string `xorm:"-"` - CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"` +func FixTopicRepositoryCount(x *xorm.Engine) error { + _, err := x.Exec(builder.Delete(builder.NotIn("`repo_id`", builder.Select("`id`").From("`repository`"))).From("`repo_topic`")) + if err != nil { + return err } - type RepoIndexerType int - - // RepoIndexerStatus see models/repo_stats_indexer.go - type RepoIndexerStatus struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"INDEX(s)"` - CommitSha string `xorm:"VARCHAR(40)"` - IndexerType RepoIndexerType `xorm:"INDEX(s) NOT NULL DEFAULT 0"` - } - - if err := x.Sync2(new(LanguageStat)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - if err := x.Sync2(new(RepoIndexerStatus)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - return nil + _, err = x.Exec(builder.Update( + builder.Eq{ + "`repo_count`": builder.Select("count(*)").From("`repo_topic`").Where(builder.Eq{ + "`repo_topic`.`topic_id`": builder.Expr("`topic`.`id`"), + }), + }).From("`topic`").Where(builder.Eq{"'1'": "1"})) + return err } diff --git a/models/migrations/v1_12/v128.go b/models/migrations/v1_12/v128.go index 44d44a26c549f..b248eb4c06cbd 100644 --- a/models/migrations/v1_12/v128.go +++ b/models/migrations/v1_12/v128.go @@ -5,123 +5,40 @@ package v1_12 //nolint import ( "fmt" - "math" - "path/filepath" - "strings" - "time" - "code.gitea.io/gitea/modules/git" - "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" ) -func FixMergeBase(x *xorm.Engine) error { - type Repository struct { - ID int64 `xorm:"pk autoincr"` - OwnerID int64 `xorm:"UNIQUE(s) index"` - OwnerName string - LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` - Name string `xorm:"INDEX NOT NULL"` +func AddLanguageStats(x *xorm.Engine) error { + // LanguageStat see models/repo_language_stats.go + type LanguageStat struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` + CommitID string + IsPrimary bool + Language string `xorm:"VARCHAR(30) UNIQUE(s) INDEX NOT NULL"` + Percentage float32 `xorm:"NUMERIC(5,2) NOT NULL DEFAULT 0"` + Color string `xorm:"-"` + CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"` } - type PullRequest struct { - ID int64 `xorm:"pk autoincr"` - Index int64 - HeadRepoID int64 `xorm:"INDEX"` - BaseRepoID int64 `xorm:"INDEX"` - HeadBranch string - BaseBranch string - MergeBase string `xorm:"VARCHAR(40)"` + type RepoIndexerType int - HasMerged bool `xorm:"INDEX"` - MergedCommitID string `xorm:"VARCHAR(40)"` + // RepoIndexerStatus see models/repo_stats_indexer.go + type RepoIndexerStatus struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"INDEX(s)"` + CommitSha string `xorm:"VARCHAR(40)"` + IndexerType RepoIndexerType `xorm:"INDEX(s) NOT NULL DEFAULT 0"` } - limit := setting.Database.IterateBufferSize - if limit <= 0 { - limit = 50 + if err := x.Sync2(new(LanguageStat)); err != nil { + return fmt.Errorf("Sync2: %w", err) } - - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - - count, err := x.Count(new(PullRequest)) - if err != nil { - return err - } - log.Info("%d Pull Request(s) to migrate ...", count) - - i := 0 - start := 0 - for { - prs := make([]PullRequest, 0, 50) - if err := x.Limit(limit, start).Asc("id").Find(&prs); err != nil { - return fmt.Errorf("Find: %w", err) - } - if len(prs) == 0 { - break - } - - start += 50 - for _, pr := range prs { - baseRepo := &Repository{ID: pr.BaseRepoID} - has, err := x.Table("repository").Get(baseRepo) - if err != nil { - return fmt.Errorf("Unable to get base repo %d %w", pr.BaseRepoID, err) - } - if !has { - log.Error("Missing base repo with id %d for PR ID %d", pr.BaseRepoID, pr.ID) - continue - } - userPath := filepath.Join(setting.RepoRootPath, strings.ToLower(baseRepo.OwnerName)) - repoPath := filepath.Join(userPath, strings.ToLower(baseRepo.Name)+".git") - - gitRefName := fmt.Sprintf("refs/pull/%d/head", pr.Index) - - if !pr.HasMerged { - var err error - pr.MergeBase, _, err = git.NewCommand(git.DefaultContext, "merge-base").AddDashesAndList(pr.BaseBranch, gitRefName).RunStdString(&git.RunOpts{Dir: repoPath}) - if err != nil { - var err2 error - pr.MergeBase, _, err2 = git.NewCommand(git.DefaultContext, "rev-parse").AddDynamicArguments(git.BranchPrefix + pr.BaseBranch).RunStdString(&git.RunOpts{Dir: repoPath}) - if err2 != nil { - log.Error("Unable to get merge base for PR ID %d, Index %d in %s/%s. Error: %v & %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err, err2) - continue - } - } - } else { - parentsString, _, err := git.NewCommand(git.DefaultContext, "rev-list", "--parents", "-n", "1").AddDynamicArguments(pr.MergedCommitID).RunStdString(&git.RunOpts{Dir: repoPath}) - if err != nil { - log.Error("Unable to get parents for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err) - continue - } - parents := strings.Split(strings.TrimSpace(parentsString), " ") - if len(parents) < 2 { - continue - } - - refs := append([]string{}, parents[1:]...) - refs = append(refs, gitRefName) - cmd := git.NewCommand(git.DefaultContext, "merge-base").AddDashesAndList(refs...) - - pr.MergeBase, _, err = cmd.RunStdString(&git.RunOpts{Dir: repoPath}) - if err != nil { - log.Error("Unable to get merge base for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err) - continue - } - } - pr.MergeBase = strings.TrimSpace(pr.MergeBase) - x.ID(pr.ID).Cols("merge_base").Update(pr) - i++ - select { - case <-ticker.C: - log.Info("%d/%d (%2.0f%%) Pull Request(s) migrated in %d batches. %d PRs Remaining ...", i, count, float64(i)/float64(count)*100, int(math.Ceil(float64(i)/float64(limit))), count-int64(i)) - default: - } - } + if err := x.Sync2(new(RepoIndexerStatus)); err != nil { + return fmt.Errorf("Sync2: %w", err) } - log.Info("Completed migrating %d Pull Request(s) in: %d batches", count, int(math.Ceil(float64(i)/float64(limit)))) return nil } diff --git a/models/migrations/v1_12/v129.go b/models/migrations/v1_12/v129.go index cf228242b9dfd..44d44a26c549f 100644 --- a/models/migrations/v1_12/v129.go +++ b/models/migrations/v1_12/v129.go @@ -4,13 +4,124 @@ package v1_12 //nolint import ( + "fmt" + "math" + "path/filepath" + "strings" + "time" + + "code.gitea.io/gitea/modules/git" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + "xorm.io/xorm" ) -func PurgeUnusedDependencies(x *xorm.Engine) error { - if _, err := x.Exec("DELETE FROM issue_dependency WHERE issue_id NOT IN (SELECT id FROM issue)"); err != nil { +func FixMergeBase(x *xorm.Engine) error { + type Repository struct { + ID int64 `xorm:"pk autoincr"` + OwnerID int64 `xorm:"UNIQUE(s) index"` + OwnerName string + LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` + Name string `xorm:"INDEX NOT NULL"` + } + + type PullRequest struct { + ID int64 `xorm:"pk autoincr"` + Index int64 + HeadRepoID int64 `xorm:"INDEX"` + BaseRepoID int64 `xorm:"INDEX"` + HeadBranch string + BaseBranch string + MergeBase string `xorm:"VARCHAR(40)"` + + HasMerged bool `xorm:"INDEX"` + MergedCommitID string `xorm:"VARCHAR(40)"` + } + + limit := setting.Database.IterateBufferSize + if limit <= 0 { + limit = 50 + } + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + count, err := x.Count(new(PullRequest)) + if err != nil { return err } - _, err := x.Exec("DELETE FROM issue_dependency WHERE dependency_id NOT IN (SELECT id FROM issue)") - return err + log.Info("%d Pull Request(s) to migrate ...", count) + + i := 0 + start := 0 + for { + prs := make([]PullRequest, 0, 50) + if err := x.Limit(limit, start).Asc("id").Find(&prs); err != nil { + return fmt.Errorf("Find: %w", err) + } + if len(prs) == 0 { + break + } + + start += 50 + for _, pr := range prs { + baseRepo := &Repository{ID: pr.BaseRepoID} + has, err := x.Table("repository").Get(baseRepo) + if err != nil { + return fmt.Errorf("Unable to get base repo %d %w", pr.BaseRepoID, err) + } + if !has { + log.Error("Missing base repo with id %d for PR ID %d", pr.BaseRepoID, pr.ID) + continue + } + userPath := filepath.Join(setting.RepoRootPath, strings.ToLower(baseRepo.OwnerName)) + repoPath := filepath.Join(userPath, strings.ToLower(baseRepo.Name)+".git") + + gitRefName := fmt.Sprintf("refs/pull/%d/head", pr.Index) + + if !pr.HasMerged { + var err error + pr.MergeBase, _, err = git.NewCommand(git.DefaultContext, "merge-base").AddDashesAndList(pr.BaseBranch, gitRefName).RunStdString(&git.RunOpts{Dir: repoPath}) + if err != nil { + var err2 error + pr.MergeBase, _, err2 = git.NewCommand(git.DefaultContext, "rev-parse").AddDynamicArguments(git.BranchPrefix + pr.BaseBranch).RunStdString(&git.RunOpts{Dir: repoPath}) + if err2 != nil { + log.Error("Unable to get merge base for PR ID %d, Index %d in %s/%s. Error: %v & %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err, err2) + continue + } + } + } else { + parentsString, _, err := git.NewCommand(git.DefaultContext, "rev-list", "--parents", "-n", "1").AddDynamicArguments(pr.MergedCommitID).RunStdString(&git.RunOpts{Dir: repoPath}) + if err != nil { + log.Error("Unable to get parents for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err) + continue + } + parents := strings.Split(strings.TrimSpace(parentsString), " ") + if len(parents) < 2 { + continue + } + + refs := append([]string{}, parents[1:]...) + refs = append(refs, gitRefName) + cmd := git.NewCommand(git.DefaultContext, "merge-base").AddDashesAndList(refs...) + + pr.MergeBase, _, err = cmd.RunStdString(&git.RunOpts{Dir: repoPath}) + if err != nil { + log.Error("Unable to get merge base for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err) + continue + } + } + pr.MergeBase = strings.TrimSpace(pr.MergeBase) + x.ID(pr.ID).Cols("merge_base").Update(pr) + i++ + select { + case <-ticker.C: + log.Info("%d/%d (%2.0f%%) Pull Request(s) migrated in %d batches. %d PRs Remaining ...", i, count, float64(i)/float64(count)*100, int(math.Ceil(float64(i)/float64(limit))), count-int64(i)) + default: + } + } + } + log.Info("Completed migrating %d Pull Request(s) in: %d batches", count, int(math.Ceil(float64(i)/float64(limit)))) + return nil } diff --git a/models/migrations/v1_12/v130.go b/models/migrations/v1_12/v130.go index 391810c7cadea..cf228242b9dfd 100644 --- a/models/migrations/v1_12/v130.go +++ b/models/migrations/v1_12/v130.go @@ -4,108 +4,13 @@ package v1_12 //nolint import ( - "code.gitea.io/gitea/modules/json" - "code.gitea.io/gitea/modules/setting" - "xorm.io/xorm" ) -func ExpandWebhooks(x *xorm.Engine) error { - type HookEvents struct { - Create bool `json:"create"` - Delete bool `json:"delete"` - Fork bool `json:"fork"` - Issues bool `json:"issues"` - IssueAssign bool `json:"issue_assign"` - IssueLabel bool `json:"issue_label"` - IssueMilestone bool `json:"issue_milestone"` - IssueComment bool `json:"issue_comment"` - Push bool `json:"push"` - PullRequest bool `json:"pull_request"` - PullRequestAssign bool `json:"pull_request_assign"` - PullRequestLabel bool `json:"pull_request_label"` - PullRequestMilestone bool `json:"pull_request_milestone"` - PullRequestComment bool `json:"pull_request_comment"` - PullRequestReview bool `json:"pull_request_review"` - PullRequestSync bool `json:"pull_request_sync"` - Repository bool `json:"repository"` - Release bool `json:"release"` - } - - type HookEvent struct { - PushOnly bool `json:"push_only"` - SendEverything bool `json:"send_everything"` - ChooseEvents bool `json:"choose_events"` - BranchFilter string `json:"branch_filter"` - - HookEvents `json:"events"` - } - - type Webhook struct { - ID int64 - Events string - } - - var bytes []byte - var last int - batchSize := setting.Database.IterateBufferSize - sess := x.NewSession() - defer sess.Close() - for { - if err := sess.Begin(); err != nil { - return err - } - results := make([]Webhook, 0, batchSize) - err := x.OrderBy("id"). - Limit(batchSize, last). - Find(&results) - if err != nil { - return err - } - if len(results) == 0 { - break - } - last += len(results) - - for _, res := range results { - var events HookEvent - if err = json.Unmarshal([]byte(res.Events), &events); err != nil { - return err - } - - if !events.ChooseEvents { - continue - } - - if events.Issues { - events.IssueAssign = true - events.IssueLabel = true - events.IssueMilestone = true - events.IssueComment = true - } - - if events.PullRequest { - events.PullRequestAssign = true - events.PullRequestLabel = true - events.PullRequestMilestone = true - events.PullRequestComment = true - events.PullRequestReview = true - events.PullRequestSync = true - } - - if bytes, err = json.Marshal(&events); err != nil { - return err - } - - _, err = sess.Exec("UPDATE webhook SET events = ? WHERE id = ?", string(bytes), res.ID) - if err != nil { - return err - } - } - - if err := sess.Commit(); err != nil { - return err - } +func PurgeUnusedDependencies(x *xorm.Engine) error { + if _, err := x.Exec("DELETE FROM issue_dependency WHERE issue_id NOT IN (SELECT id FROM issue)"); err != nil { + return err } - return nil + _, err := x.Exec("DELETE FROM issue_dependency WHERE dependency_id NOT IN (SELECT id FROM issue)") + return err } diff --git a/models/migrations/v1_12/v131.go b/models/migrations/v1_12/v131.go index a734d9fbf8f97..391810c7cadea 100644 --- a/models/migrations/v1_12/v131.go +++ b/models/migrations/v1_12/v131.go @@ -4,18 +4,108 @@ package v1_12 //nolint import ( - "fmt" + "code.gitea.io/gitea/modules/json" + "code.gitea.io/gitea/modules/setting" "xorm.io/xorm" ) -func AddSystemWebhookColumn(x *xorm.Engine) error { +func ExpandWebhooks(x *xorm.Engine) error { + type HookEvents struct { + Create bool `json:"create"` + Delete bool `json:"delete"` + Fork bool `json:"fork"` + Issues bool `json:"issues"` + IssueAssign bool `json:"issue_assign"` + IssueLabel bool `json:"issue_label"` + IssueMilestone bool `json:"issue_milestone"` + IssueComment bool `json:"issue_comment"` + Push bool `json:"push"` + PullRequest bool `json:"pull_request"` + PullRequestAssign bool `json:"pull_request_assign"` + PullRequestLabel bool `json:"pull_request_label"` + PullRequestMilestone bool `json:"pull_request_milestone"` + PullRequestComment bool `json:"pull_request_comment"` + PullRequestReview bool `json:"pull_request_review"` + PullRequestSync bool `json:"pull_request_sync"` + Repository bool `json:"repository"` + Release bool `json:"release"` + } + + type HookEvent struct { + PushOnly bool `json:"push_only"` + SendEverything bool `json:"send_everything"` + ChooseEvents bool `json:"choose_events"` + BranchFilter string `json:"branch_filter"` + + HookEvents `json:"events"` + } + type Webhook struct { - IsSystemWebhook bool `xorm:"NOT NULL DEFAULT false"` + ID int64 + Events string } - if err := x.Sync2(new(Webhook)); err != nil { - return fmt.Errorf("Sync2: %w", err) + var bytes []byte + var last int + batchSize := setting.Database.IterateBufferSize + sess := x.NewSession() + defer sess.Close() + for { + if err := sess.Begin(); err != nil { + return err + } + results := make([]Webhook, 0, batchSize) + err := x.OrderBy("id"). + Limit(batchSize, last). + Find(&results) + if err != nil { + return err + } + if len(results) == 0 { + break + } + last += len(results) + + for _, res := range results { + var events HookEvent + if err = json.Unmarshal([]byte(res.Events), &events); err != nil { + return err + } + + if !events.ChooseEvents { + continue + } + + if events.Issues { + events.IssueAssign = true + events.IssueLabel = true + events.IssueMilestone = true + events.IssueComment = true + } + + if events.PullRequest { + events.PullRequestAssign = true + events.PullRequestLabel = true + events.PullRequestMilestone = true + events.PullRequestComment = true + events.PullRequestReview = true + events.PullRequestSync = true + } + + if bytes, err = json.Marshal(&events); err != nil { + return err + } + + _, err = sess.Exec("UPDATE webhook SET events = ? WHERE id = ?", string(bytes), res.ID) + if err != nil { + return err + } + } + + if err := sess.Commit(); err != nil { + return err + } } return nil } diff --git a/models/migrations/v1_12/v132.go b/models/migrations/v1_12/v132.go index 1708a57a7e4cf..a734d9fbf8f97 100644 --- a/models/migrations/v1_12/v132.go +++ b/models/migrations/v1_12/v132.go @@ -9,12 +9,12 @@ import ( "xorm.io/xorm" ) -func AddBranchProtectionProtectedFilesColumn(x *xorm.Engine) error { - type ProtectedBranch struct { - ProtectedFilePatterns string `xorm:"TEXT"` +func AddSystemWebhookColumn(x *xorm.Engine) error { + type Webhook struct { + IsSystemWebhook bool `xorm:"NOT NULL DEFAULT false"` } - if err := x.Sync2(new(ProtectedBranch)); err != nil { + if err := x.Sync2(new(Webhook)); err != nil { return fmt.Errorf("Sync2: %w", err) } return nil diff --git a/models/migrations/v1_12/v133.go b/models/migrations/v1_12/v133.go index e1c392459b8e6..1708a57a7e4cf 100644 --- a/models/migrations/v1_12/v133.go +++ b/models/migrations/v1_12/v133.go @@ -3,13 +3,19 @@ package v1_12 //nolint -import "xorm.io/xorm" +import ( + "fmt" -func AddEmailHashTable(x *xorm.Engine) error { - // EmailHash represents a pre-generated hash map - type EmailHash struct { - Hash string `xorm:"pk varchar(32)"` - Email string `xorm:"UNIQUE NOT NULL"` + "xorm.io/xorm" +) + +func AddBranchProtectionProtectedFilesColumn(x *xorm.Engine) error { + type ProtectedBranch struct { + ProtectedFilePatterns string `xorm:"TEXT"` + } + + if err := x.Sync2(new(ProtectedBranch)); err != nil { + return fmt.Errorf("Sync2: %w", err) } - return x.Sync2(new(EmailHash)) + return nil } diff --git a/models/migrations/v1_12/v134.go b/models/migrations/v1_12/v134.go index 3d1c82f09e164..e1c392459b8e6 100644 --- a/models/migrations/v1_12/v134.go +++ b/models/migrations/v1_12/v134.go @@ -3,113 +3,13 @@ package v1_12 //nolint -import ( - "fmt" - "math" - "path/filepath" - "strings" - "time" +import "xorm.io/xorm" - "code.gitea.io/gitea/modules/git" - "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/setting" - - "xorm.io/xorm" -) - -func RefixMergeBase(x *xorm.Engine) error { - type Repository struct { - ID int64 `xorm:"pk autoincr"` - OwnerID int64 `xorm:"UNIQUE(s) index"` - OwnerName string - LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` - Name string `xorm:"INDEX NOT NULL"` - } - - type PullRequest struct { - ID int64 `xorm:"pk autoincr"` - Index int64 - HeadRepoID int64 `xorm:"INDEX"` - BaseRepoID int64 `xorm:"INDEX"` - HeadBranch string - BaseBranch string - MergeBase string `xorm:"VARCHAR(40)"` - - HasMerged bool `xorm:"INDEX"` - MergedCommitID string `xorm:"VARCHAR(40)"` - } - - limit := setting.Database.IterateBufferSize - if limit <= 0 { - limit = 50 - } - - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - count, err := x.Where("has_merged = ?", true).Count(new(PullRequest)) - if err != nil { - return err +func AddEmailHashTable(x *xorm.Engine) error { + // EmailHash represents a pre-generated hash map + type EmailHash struct { + Hash string `xorm:"pk varchar(32)"` + Email string `xorm:"UNIQUE NOT NULL"` } - log.Info("%d Merged Pull Request(s) to migrate ...", count) - - i := 0 - start := 0 - for { - prs := make([]PullRequest, 0, 50) - if err := x.Limit(limit, start).Asc("id").Where("has_merged = ?", true).Find(&prs); err != nil { - return fmt.Errorf("Find: %w", err) - } - if len(prs) == 0 { - break - } - - start += 50 - for _, pr := range prs { - baseRepo := &Repository{ID: pr.BaseRepoID} - has, err := x.Table("repository").Get(baseRepo) - if err != nil { - return fmt.Errorf("Unable to get base repo %d %w", pr.BaseRepoID, err) - } - if !has { - log.Error("Missing base repo with id %d for PR ID %d", pr.BaseRepoID, pr.ID) - continue - } - userPath := filepath.Join(setting.RepoRootPath, strings.ToLower(baseRepo.OwnerName)) - repoPath := filepath.Join(userPath, strings.ToLower(baseRepo.Name)+".git") - - gitRefName := fmt.Sprintf("refs/pull/%d/head", pr.Index) - - parentsString, _, err := git.NewCommand(git.DefaultContext, "rev-list", "--parents", "-n", "1").AddDynamicArguments(pr.MergedCommitID).RunStdString(&git.RunOpts{Dir: repoPath}) - if err != nil { - log.Error("Unable to get parents for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err) - continue - } - parents := strings.Split(strings.TrimSpace(parentsString), " ") - if len(parents) < 3 { - continue - } - - // we should recalculate - refs := append([]string{}, parents[1:]...) - refs = append(refs, gitRefName) - cmd := git.NewCommand(git.DefaultContext, "merge-base").AddDashesAndList(refs...) - - pr.MergeBase, _, err = cmd.RunStdString(&git.RunOpts{Dir: repoPath}) - if err != nil { - log.Error("Unable to get merge base for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err) - continue - } - pr.MergeBase = strings.TrimSpace(pr.MergeBase) - x.ID(pr.ID).Cols("merge_base").Update(pr) - i++ - select { - case <-ticker.C: - log.Info("%d/%d (%2.0f%%) Pull Request(s) migrated in %d batches. %d PRs Remaining ...", i, count, float64(i)/float64(count)*100, int(math.Ceil(float64(i)/float64(limit))), count-int64(i)) - default: - } - } - } - - log.Info("Completed migrating %d Pull Request(s) in: %d batches", count, int(math.Ceil(float64(i)/float64(limit)))) - return nil + return x.Sync2(new(EmailHash)) } diff --git a/models/migrations/v1_12/v135.go b/models/migrations/v1_12/v135.go index 5a87d9911b38f..3d1c82f09e164 100644 --- a/models/migrations/v1_12/v135.go +++ b/models/migrations/v1_12/v135.go @@ -5,17 +5,111 @@ package v1_12 //nolint import ( "fmt" + "math" + "path/filepath" + "strings" + "time" + + "code.gitea.io/gitea/modules/git" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" "xorm.io/xorm" ) -func AddOrgIDLabelColumn(x *xorm.Engine) error { - type Label struct { - OrgID int64 `xorm:"INDEX"` +func RefixMergeBase(x *xorm.Engine) error { + type Repository struct { + ID int64 `xorm:"pk autoincr"` + OwnerID int64 `xorm:"UNIQUE(s) index"` + OwnerName string + LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` + Name string `xorm:"INDEX NOT NULL"` + } + + type PullRequest struct { + ID int64 `xorm:"pk autoincr"` + Index int64 + HeadRepoID int64 `xorm:"INDEX"` + BaseRepoID int64 `xorm:"INDEX"` + HeadBranch string + BaseBranch string + MergeBase string `xorm:"VARCHAR(40)"` + + HasMerged bool `xorm:"INDEX"` + MergedCommitID string `xorm:"VARCHAR(40)"` + } + + limit := setting.Database.IterateBufferSize + if limit <= 0 { + limit = 50 + } + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + count, err := x.Where("has_merged = ?", true).Count(new(PullRequest)) + if err != nil { + return err } + log.Info("%d Merged Pull Request(s) to migrate ...", count) + + i := 0 + start := 0 + for { + prs := make([]PullRequest, 0, 50) + if err := x.Limit(limit, start).Asc("id").Where("has_merged = ?", true).Find(&prs); err != nil { + return fmt.Errorf("Find: %w", err) + } + if len(prs) == 0 { + break + } + + start += 50 + for _, pr := range prs { + baseRepo := &Repository{ID: pr.BaseRepoID} + has, err := x.Table("repository").Get(baseRepo) + if err != nil { + return fmt.Errorf("Unable to get base repo %d %w", pr.BaseRepoID, err) + } + if !has { + log.Error("Missing base repo with id %d for PR ID %d", pr.BaseRepoID, pr.ID) + continue + } + userPath := filepath.Join(setting.RepoRootPath, strings.ToLower(baseRepo.OwnerName)) + repoPath := filepath.Join(userPath, strings.ToLower(baseRepo.Name)+".git") + + gitRefName := fmt.Sprintf("refs/pull/%d/head", pr.Index) - if err := x.Sync2(new(Label)); err != nil { - return fmt.Errorf("Sync2: %w", err) + parentsString, _, err := git.NewCommand(git.DefaultContext, "rev-list", "--parents", "-n", "1").AddDynamicArguments(pr.MergedCommitID).RunStdString(&git.RunOpts{Dir: repoPath}) + if err != nil { + log.Error("Unable to get parents for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err) + continue + } + parents := strings.Split(strings.TrimSpace(parentsString), " ") + if len(parents) < 3 { + continue + } + + // we should recalculate + refs := append([]string{}, parents[1:]...) + refs = append(refs, gitRefName) + cmd := git.NewCommand(git.DefaultContext, "merge-base").AddDashesAndList(refs...) + + pr.MergeBase, _, err = cmd.RunStdString(&git.RunOpts{Dir: repoPath}) + if err != nil { + log.Error("Unable to get merge base for merged PR ID %d, Index %d in %s/%s. Error: %v", pr.ID, pr.Index, baseRepo.OwnerName, baseRepo.Name, err) + continue + } + pr.MergeBase = strings.TrimSpace(pr.MergeBase) + x.ID(pr.ID).Cols("merge_base").Update(pr) + i++ + select { + case <-ticker.C: + log.Info("%d/%d (%2.0f%%) Pull Request(s) migrated in %d batches. %d PRs Remaining ...", i, count, float64(i)/float64(count)*100, int(math.Ceil(float64(i)/float64(limit))), count-int64(i)) + default: + } + } } + + log.Info("Completed migrating %d Pull Request(s) in: %d batches", count, int(math.Ceil(float64(i)/float64(limit)))) return nil } diff --git a/models/migrations/v1_12/v136.go b/models/migrations/v1_12/v136.go index 0cecba7be9395..5a87d9911b38f 100644 --- a/models/migrations/v1_12/v136.go +++ b/models/migrations/v1_12/v136.go @@ -5,121 +5,17 @@ package v1_12 //nolint import ( "fmt" - "math" - "path/filepath" - "strings" - "time" - - "code.gitea.io/gitea/modules/git" - "code.gitea.io/gitea/modules/graceful" - "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/setting" "xorm.io/xorm" ) -func AddCommitDivergenceToPulls(x *xorm.Engine) error { - type Repository struct { - ID int64 `xorm:"pk autoincr"` - OwnerID int64 `xorm:"UNIQUE(s) index"` - OwnerName string - LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` - Name string `xorm:"INDEX NOT NULL"` +func AddOrgIDLabelColumn(x *xorm.Engine) error { + type Label struct { + OrgID int64 `xorm:"INDEX"` } - type PullRequest struct { - ID int64 `xorm:"pk autoincr"` - IssueID int64 `xorm:"INDEX"` - Index int64 - - CommitsAhead int - CommitsBehind int - - BaseRepoID int64 `xorm:"INDEX"` - BaseBranch string - - HasMerged bool `xorm:"INDEX"` - MergedCommitID string `xorm:"VARCHAR(40)"` - } - - if err := x.Sync2(new(PullRequest)); err != nil { + if err := x.Sync2(new(Label)); err != nil { return fmt.Errorf("Sync2: %w", err) } - - last := 0 - migrated := 0 - - batchSize := setting.Database.IterateBufferSize - sess := x.NewSession() - defer sess.Close() - - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - count, err := sess.Where("has_merged = ?", false).Count(new(PullRequest)) - if err != nil { - return err - } - log.Info("%d Unmerged Pull Request(s) to migrate ...", count) - - for { - if err := sess.Begin(); err != nil { - return err - } - results := make([]*PullRequest, 0, batchSize) - err := sess.Where("has_merged = ?", false).OrderBy("id").Limit(batchSize, last).Find(&results) - if err != nil { - return err - } - if len(results) == 0 { - break - } - last += batchSize - - for _, pr := range results { - baseRepo := &Repository{ID: pr.BaseRepoID} - has, err := x.Table("repository").Get(baseRepo) - if err != nil { - return fmt.Errorf("Unable to get base repo %d %w", pr.BaseRepoID, err) - } - if !has { - log.Error("Missing base repo with id %d for PR ID %d", pr.BaseRepoID, pr.ID) - continue - } - userPath := filepath.Join(setting.RepoRootPath, strings.ToLower(baseRepo.OwnerName)) - repoPath := filepath.Join(userPath, strings.ToLower(baseRepo.Name)+".git") - - gitRefName := fmt.Sprintf("refs/pull/%d/head", pr.Index) - - divergence, err := git.GetDivergingCommits(graceful.GetManager().HammerContext(), repoPath, pr.BaseBranch, gitRefName) - if err != nil { - log.Warn("Could not recalculate Divergence for pull: %d", pr.ID) - pr.CommitsAhead = 0 - pr.CommitsBehind = 0 - } - pr.CommitsAhead = divergence.Ahead - pr.CommitsBehind = divergence.Behind - - if _, err = sess.ID(pr.ID).Cols("commits_ahead", "commits_behind").Update(pr); err != nil { - return fmt.Errorf("Update Cols: %w", err) - } - migrated++ - } - - if err := sess.Commit(); err != nil { - return err - } - select { - case <-ticker.C: - log.Info( - "%d/%d (%2.0f%%) Pull Request(s) migrated in %d batches. %d PRs Remaining ...", - migrated, - count, - float64(migrated)/float64(count)*100, - int(math.Ceil(float64(migrated)/float64(batchSize))), - count-int64(migrated)) - default: - } - } - log.Info("Completed migrating %d Pull Request(s) in: %d batches", count, int(math.Ceil(float64(migrated)/float64(batchSize)))) return nil } diff --git a/models/migrations/v1_12/v137.go b/models/migrations/v1_12/v137.go index 371b1a3fdb85d..0cecba7be9395 100644 --- a/models/migrations/v1_12/v137.go +++ b/models/migrations/v1_12/v137.go @@ -4,12 +4,122 @@ package v1_12 //nolint import ( + "fmt" + "math" + "path/filepath" + "strings" + "time" + + "code.gitea.io/gitea/modules/git" + "code.gitea.io/gitea/modules/graceful" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + "xorm.io/xorm" ) -func AddBlockOnOutdatedBranch(x *xorm.Engine) error { - type ProtectedBranch struct { - BlockOnOutdatedBranch bool `xorm:"NOT NULL DEFAULT false"` +func AddCommitDivergenceToPulls(x *xorm.Engine) error { + type Repository struct { + ID int64 `xorm:"pk autoincr"` + OwnerID int64 `xorm:"UNIQUE(s) index"` + OwnerName string + LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` + Name string `xorm:"INDEX NOT NULL"` + } + + type PullRequest struct { + ID int64 `xorm:"pk autoincr"` + IssueID int64 `xorm:"INDEX"` + Index int64 + + CommitsAhead int + CommitsBehind int + + BaseRepoID int64 `xorm:"INDEX"` + BaseBranch string + + HasMerged bool `xorm:"INDEX"` + MergedCommitID string `xorm:"VARCHAR(40)"` + } + + if err := x.Sync2(new(PullRequest)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } + + last := 0 + migrated := 0 + + batchSize := setting.Database.IterateBufferSize + sess := x.NewSession() + defer sess.Close() + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + count, err := sess.Where("has_merged = ?", false).Count(new(PullRequest)) + if err != nil { + return err + } + log.Info("%d Unmerged Pull Request(s) to migrate ...", count) + + for { + if err := sess.Begin(); err != nil { + return err + } + results := make([]*PullRequest, 0, batchSize) + err := sess.Where("has_merged = ?", false).OrderBy("id").Limit(batchSize, last).Find(&results) + if err != nil { + return err + } + if len(results) == 0 { + break + } + last += batchSize + + for _, pr := range results { + baseRepo := &Repository{ID: pr.BaseRepoID} + has, err := x.Table("repository").Get(baseRepo) + if err != nil { + return fmt.Errorf("Unable to get base repo %d %w", pr.BaseRepoID, err) + } + if !has { + log.Error("Missing base repo with id %d for PR ID %d", pr.BaseRepoID, pr.ID) + continue + } + userPath := filepath.Join(setting.RepoRootPath, strings.ToLower(baseRepo.OwnerName)) + repoPath := filepath.Join(userPath, strings.ToLower(baseRepo.Name)+".git") + + gitRefName := fmt.Sprintf("refs/pull/%d/head", pr.Index) + + divergence, err := git.GetDivergingCommits(graceful.GetManager().HammerContext(), repoPath, pr.BaseBranch, gitRefName) + if err != nil { + log.Warn("Could not recalculate Divergence for pull: %d", pr.ID) + pr.CommitsAhead = 0 + pr.CommitsBehind = 0 + } + pr.CommitsAhead = divergence.Ahead + pr.CommitsBehind = divergence.Behind + + if _, err = sess.ID(pr.ID).Cols("commits_ahead", "commits_behind").Update(pr); err != nil { + return fmt.Errorf("Update Cols: %w", err) + } + migrated++ + } + + if err := sess.Commit(); err != nil { + return err + } + select { + case <-ticker.C: + log.Info( + "%d/%d (%2.0f%%) Pull Request(s) migrated in %d batches. %d PRs Remaining ...", + migrated, + count, + float64(migrated)/float64(count)*100, + int(math.Ceil(float64(migrated)/float64(batchSize))), + count-int64(migrated)) + default: + } } - return x.Sync2(new(ProtectedBranch)) + log.Info("Completed migrating %d Pull Request(s) in: %d batches", count, int(math.Ceil(float64(migrated)/float64(batchSize)))) + return nil } diff --git a/models/migrations/v1_12/v138.go b/models/migrations/v1_12/v138.go index a2bfdb59a85cb..371b1a3fdb85d 100644 --- a/models/migrations/v1_12/v138.go +++ b/models/migrations/v1_12/v138.go @@ -4,18 +4,12 @@ package v1_12 //nolint import ( - "fmt" - "xorm.io/xorm" ) -func AddResolveDoerIDCommentColumn(x *xorm.Engine) error { - type Comment struct { - ResolveDoerID int64 - } - - if err := x.Sync2(new(Comment)); err != nil { - return fmt.Errorf("Sync2: %w", err) +func AddBlockOnOutdatedBranch(x *xorm.Engine) error { + type ProtectedBranch struct { + BlockOnOutdatedBranch bool `xorm:"NOT NULL DEFAULT false"` } - return nil + return x.Sync2(new(ProtectedBranch)) } diff --git a/models/migrations/v1_12/v139.go b/models/migrations/v1_12/v139.go index 279aa7df87dc4..a2bfdb59a85cb 100644 --- a/models/migrations/v1_12/v139.go +++ b/models/migrations/v1_12/v139.go @@ -1,26 +1,21 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. +// Copyright 2020 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_12 //nolint import ( - "code.gitea.io/gitea/modules/setting" + "fmt" "xorm.io/xorm" ) -func PrependRefsHeadsToIssueRefs(x *xorm.Engine) error { - var query string - - switch { - case setting.Database.Type.IsMSSQL(): - query = "UPDATE `issue` SET `ref` = 'refs/heads/' + `ref` WHERE `ref` IS NOT NULL AND `ref` <> '' AND `ref` NOT LIKE 'refs/%'" - case setting.Database.Type.IsMySQL(): - query = "UPDATE `issue` SET `ref` = CONCAT('refs/heads/', `ref`) WHERE `ref` IS NOT NULL AND `ref` <> '' AND `ref` NOT LIKE 'refs/%';" - default: - query = "UPDATE `issue` SET `ref` = 'refs/heads/' || `ref` WHERE `ref` IS NOT NULL AND `ref` <> '' AND `ref` NOT LIKE 'refs/%'" +func AddResolveDoerIDCommentColumn(x *xorm.Engine) error { + type Comment struct { + ResolveDoerID int64 } - _, err := x.Exec(query) - return err + if err := x.Sync2(new(Comment)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } + return nil } diff --git a/models/migrations/v1_12/v140.go b/models/migrations/v1_12/v140.go new file mode 100644 index 0000000000000..279aa7df87dc4 --- /dev/null +++ b/models/migrations/v1_12/v140.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_12 //nolint + +import ( + "code.gitea.io/gitea/modules/setting" + + "xorm.io/xorm" +) + +func PrependRefsHeadsToIssueRefs(x *xorm.Engine) error { + var query string + + switch { + case setting.Database.Type.IsMSSQL(): + query = "UPDATE `issue` SET `ref` = 'refs/heads/' + `ref` WHERE `ref` IS NOT NULL AND `ref` <> '' AND `ref` NOT LIKE 'refs/%'" + case setting.Database.Type.IsMySQL(): + query = "UPDATE `issue` SET `ref` = CONCAT('refs/heads/', `ref`) WHERE `ref` IS NOT NULL AND `ref` <> '' AND `ref` NOT LIKE 'refs/%';" + default: + query = "UPDATE `issue` SET `ref` = 'refs/heads/' || `ref` WHERE `ref` IS NOT NULL AND `ref` <> '' AND `ref` NOT LIKE 'refs/%'" + } + + _, err := x.Exec(query) + return err +} diff --git a/models/migrations/v1_13/v140.go b/models/migrations/v1_13/v140.go deleted file mode 100644 index 30c1bc07d22fc..0000000000000 --- a/models/migrations/v1_13/v140.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2020 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_13 //nolint - -import ( - "fmt" - - "code.gitea.io/gitea/models/migrations/base" - "code.gitea.io/gitea/modules/setting" - - "xorm.io/xorm" -) - -func FixLanguageStatsToSaveSize(x *xorm.Engine) error { - // LanguageStat see models/repo_language_stats.go - type LanguageStat struct { - Size int64 `xorm:"NOT NULL DEFAULT 0"` - } - - // RepoIndexerType specifies the repository indexer type - type RepoIndexerType int - - const ( - // RepoIndexerTypeCode code indexer - 0 - RepoIndexerTypeCode RepoIndexerType = iota //nolint:unused - // RepoIndexerTypeStats repository stats indexer - 1 - RepoIndexerTypeStats - ) - - // RepoIndexerStatus see models/repo_indexer.go - type RepoIndexerStatus struct { - IndexerType RepoIndexerType `xorm:"INDEX(s) NOT NULL DEFAULT 0"` - } - - if err := x.Sync2(new(LanguageStat)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - - x.Delete(&RepoIndexerStatus{IndexerType: RepoIndexerTypeStats}) - - // Delete language stat statuses - truncExpr := "TRUNCATE TABLE" - if setting.Database.Type.IsSQLite3() { - truncExpr = "DELETE FROM" - } - - // Delete language stats - if _, err := x.Exec(fmt.Sprintf("%s language_stat", truncExpr)); err != nil { - return err - } - - sess := x.NewSession() - defer sess.Close() - return base.DropTableColumns(sess, "language_stat", "percentage") -} diff --git a/models/migrations/v1_13/v141.go b/models/migrations/v1_13/v141.go index 80796e6db9319..30c1bc07d22fc 100644 --- a/models/migrations/v1_13/v141.go +++ b/models/migrations/v1_13/v141.go @@ -6,16 +6,51 @@ package v1_13 //nolint import ( "fmt" + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/setting" + "xorm.io/xorm" ) -func AddKeepActivityPrivateUserColumn(x *xorm.Engine) error { - type User struct { - KeepActivityPrivate bool `xorm:"NOT NULL DEFAULT false"` +func FixLanguageStatsToSaveSize(x *xorm.Engine) error { + // LanguageStat see models/repo_language_stats.go + type LanguageStat struct { + Size int64 `xorm:"NOT NULL DEFAULT 0"` + } + + // RepoIndexerType specifies the repository indexer type + type RepoIndexerType int + + const ( + // RepoIndexerTypeCode code indexer - 0 + RepoIndexerTypeCode RepoIndexerType = iota //nolint:unused + // RepoIndexerTypeStats repository stats indexer - 1 + RepoIndexerTypeStats + ) + + // RepoIndexerStatus see models/repo_indexer.go + type RepoIndexerStatus struct { + IndexerType RepoIndexerType `xorm:"INDEX(s) NOT NULL DEFAULT 0"` } - if err := x.Sync2(new(User)); err != nil { + if err := x.Sync2(new(LanguageStat)); err != nil { return fmt.Errorf("Sync2: %w", err) } - return nil + + x.Delete(&RepoIndexerStatus{IndexerType: RepoIndexerTypeStats}) + + // Delete language stat statuses + truncExpr := "TRUNCATE TABLE" + if setting.Database.Type.IsSQLite3() { + truncExpr = "DELETE FROM" + } + + // Delete language stats + if _, err := x.Exec(fmt.Sprintf("%s language_stat", truncExpr)); err != nil { + return err + } + + sess := x.NewSession() + defer sess.Close() + return base.DropTableColumns(sess, "language_stat", "percentage") } diff --git a/models/migrations/v1_13/v142.go b/models/migrations/v1_13/v142.go index 7c7c01ad47d8a..80796e6db9319 100644 --- a/models/migrations/v1_13/v142.go +++ b/models/migrations/v1_13/v142.go @@ -4,21 +4,18 @@ package v1_13 //nolint import ( - "code.gitea.io/gitea/modules/log" + "fmt" - "xorm.io/builder" "xorm.io/xorm" ) -func SetIsArchivedToFalse(x *xorm.Engine) error { - type Repository struct { - IsArchived bool `xorm:"INDEX"` +func AddKeepActivityPrivateUserColumn(x *xorm.Engine) error { + type User struct { + KeepActivityPrivate bool `xorm:"NOT NULL DEFAULT false"` } - count, err := x.Where(builder.IsNull{"is_archived"}).Cols("is_archived").Update(&Repository{ - IsArchived: false, - }) - if err == nil { - log.Debug("Updated %d repositories with is_archived IS NULL", count) + + if err := x.Sync2(new(User)); err != nil { + return fmt.Errorf("Sync2: %w", err) } - return err + return nil } diff --git a/models/migrations/v1_13/v143.go b/models/migrations/v1_13/v143.go index ad1a8c66a5d15..7c7c01ad47d8a 100644 --- a/models/migrations/v1_13/v143.go +++ b/models/migrations/v1_13/v143.go @@ -6,46 +6,19 @@ package v1_13 //nolint import ( "code.gitea.io/gitea/modules/log" + "xorm.io/builder" "xorm.io/xorm" ) -func RecalculateStars(x *xorm.Engine) (err error) { - // because of issue https://github.com/go-gitea/gitea/issues/11949, - // recalculate Stars number for all users to fully fix it. - - type User struct { - ID int64 `xorm:"pk autoincr"` +func SetIsArchivedToFalse(x *xorm.Engine) error { + type Repository struct { + IsArchived bool `xorm:"INDEX"` } - - const batchSize = 100 - sess := x.NewSession() - defer sess.Close() - - for start := 0; ; start += batchSize { - users := make([]User, 0, batchSize) - if err = sess.Limit(batchSize, start).Where("type = ?", 0).Cols("id").Find(&users); err != nil { - return - } - if len(users) == 0 { - break - } - - if err = sess.Begin(); err != nil { - return - } - - for _, user := range users { - if _, err = sess.Exec("UPDATE `user` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE uid=?) WHERE id=?", user.ID, user.ID); err != nil { - return - } - } - - if err = sess.Commit(); err != nil { - return - } + count, err := x.Where(builder.IsNull{"is_archived"}).Cols("is_archived").Update(&Repository{ + IsArchived: false, + }) + if err == nil { + log.Debug("Updated %d repositories with is_archived IS NULL", count) } - - log.Debug("recalculate Stars number for all user finished") - return err } diff --git a/models/migrations/v1_13/v144.go b/models/migrations/v1_13/v144.go index f5a0bc575100f..ad1a8c66a5d15 100644 --- a/models/migrations/v1_13/v144.go +++ b/models/migrations/v1_13/v144.go @@ -6,20 +6,46 @@ package v1_13 //nolint import ( "code.gitea.io/gitea/modules/log" - "xorm.io/builder" "xorm.io/xorm" ) -func UpdateMatrixWebhookHTTPMethod(x *xorm.Engine) error { - matrixHookTaskType := 9 // value comes from the models package - type Webhook struct { - HTTPMethod string +func RecalculateStars(x *xorm.Engine) (err error) { + // because of issue https://github.com/go-gitea/gitea/issues/11949, + // recalculate Stars number for all users to fully fix it. + + type User struct { + ID int64 `xorm:"pk autoincr"` } - cond := builder.Eq{"hook_task_type": matrixHookTaskType}.And(builder.Neq{"http_method": "PUT"}) - count, err := x.Where(cond).Cols("http_method").Update(&Webhook{HTTPMethod: "PUT"}) - if err == nil { - log.Debug("Updated %d Matrix webhooks with http_method 'PUT'", count) + const batchSize = 100 + sess := x.NewSession() + defer sess.Close() + + for start := 0; ; start += batchSize { + users := make([]User, 0, batchSize) + if err = sess.Limit(batchSize, start).Where("type = ?", 0).Cols("id").Find(&users); err != nil { + return + } + if len(users) == 0 { + break + } + + if err = sess.Begin(); err != nil { + return + } + + for _, user := range users { + if _, err = sess.Exec("UPDATE `user` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE uid=?) WHERE id=?", user.ID, user.ID); err != nil { + return + } + } + + if err = sess.Commit(); err != nil { + return + } } + + log.Debug("recalculate Stars number for all user finished") + return err } diff --git a/models/migrations/v1_13/v145.go b/models/migrations/v1_13/v145.go index ee40bfc77f862..f5a0bc575100f 100644 --- a/models/migrations/v1_13/v145.go +++ b/models/migrations/v1_13/v145.go @@ -4,78 +4,22 @@ package v1_13 //nolint import ( - "fmt" - - "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/log" + "xorm.io/builder" "xorm.io/xorm" ) -func IncreaseLanguageField(x *xorm.Engine) error { - type LanguageStat struct { - RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` - Language string `xorm:"VARCHAR(50) UNIQUE(s) INDEX NOT NULL"` - } - - if err := x.Sync2(new(LanguageStat)); err != nil { - return err - } - - if setting.Database.Type.IsSQLite3() { - // SQLite maps VARCHAR to TEXT without size so we're done - return nil +func UpdateMatrixWebhookHTTPMethod(x *xorm.Engine) error { + matrixHookTaskType := 9 // value comes from the models package + type Webhook struct { + HTTPMethod string } - // need to get the correct type for the new column - inferredTable, err := x.TableInfo(new(LanguageStat)) - if err != nil { - return err + cond := builder.Eq{"hook_task_type": matrixHookTaskType}.And(builder.Neq{"http_method": "PUT"}) + count, err := x.Where(cond).Cols("http_method").Update(&Webhook{HTTPMethod: "PUT"}) + if err == nil { + log.Debug("Updated %d Matrix webhooks with http_method 'PUT'", count) } - column := inferredTable.GetColumn("language") - sqlType := x.Dialect().SQLType(column) - - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - - switch { - case setting.Database.Type.IsMySQL(): - if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE language_stat MODIFY COLUMN language %s", sqlType)); err != nil { - return err - } - case setting.Database.Type.IsMSSQL(): - // Yet again MSSQL just has to be awkward. - // Here we have to drop the constraints first and then rebuild them - constraints := make([]string, 0) - if err := sess.SQL(`SELECT i.name AS Name - FROM sys.indexes i INNER JOIN sys.index_columns ic - ON i.index_id = ic.index_id AND i.object_id = ic.object_id - INNER JOIN sys.tables AS t - ON t.object_id = i.object_id - INNER JOIN sys.columns c - ON t.object_id = c.object_id AND ic.column_id = c.column_id - WHERE t.name = 'language_stat' AND c.name = 'language'`).Find(&constraints); err != nil { - return fmt.Errorf("Find constraints: %w", err) - } - for _, constraint := range constraints { - if _, err := sess.Exec(fmt.Sprintf("DROP INDEX [%s] ON `language_stat`", constraint)); err != nil { - return fmt.Errorf("Drop table `language_stat` constraint `%s`: %w", constraint, err) - } - } - if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE language_stat ALTER COLUMN language %s", sqlType)); err != nil { - return err - } - // Finally restore the constraint - if err := sess.CreateUniques(new(LanguageStat)); err != nil { - return err - } - case setting.Database.Type.IsPostgreSQL(): - if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE language_stat ALTER COLUMN language TYPE %s", sqlType)); err != nil { - return err - } - } - - return sess.Commit() + return err } diff --git a/models/migrations/v1_13/v146.go b/models/migrations/v1_13/v146.go index 5db8b0a4374ac..ee40bfc77f862 100644 --- a/models/migrations/v1_13/v146.go +++ b/models/migrations/v1_13/v146.go @@ -4,80 +4,78 @@ package v1_13 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" + "fmt" + + "code.gitea.io/gitea/modules/setting" "xorm.io/xorm" ) -func AddProjectsInfo(x *xorm.Engine) error { - // Create new tables - type ( - ProjectType uint8 - ProjectBoardType uint8 - ) - - type Project struct { - ID int64 `xorm:"pk autoincr"` - Title string `xorm:"INDEX NOT NULL"` - Description string `xorm:"TEXT"` - RepoID int64 `xorm:"INDEX"` - CreatorID int64 `xorm:"NOT NULL"` - IsClosed bool `xorm:"INDEX"` - - BoardType ProjectBoardType - Type ProjectType - - ClosedDateUnix timeutil.TimeStamp - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` +func IncreaseLanguageField(x *xorm.Engine) error { + type LanguageStat struct { + RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` + Language string `xorm:"VARCHAR(50) UNIQUE(s) INDEX NOT NULL"` } - if err := x.Sync2(new(Project)); err != nil { + if err := x.Sync2(new(LanguageStat)); err != nil { return err } - type Comment struct { - OldProjectID int64 - ProjectID int64 + if setting.Database.Type.IsSQLite3() { + // SQLite maps VARCHAR to TEXT without size so we're done + return nil } - if err := x.Sync2(new(Comment)); err != nil { + // need to get the correct type for the new column + inferredTable, err := x.TableInfo(new(LanguageStat)) + if err != nil { return err } + column := inferredTable.GetColumn("language") + sqlType := x.Dialect().SQLType(column) - type Repository struct { - ID int64 - NumProjects int `xorm:"NOT NULL DEFAULT 0"` - NumClosedProjects int `xorm:"NOT NULL DEFAULT 0"` - } - - if err := x.Sync2(new(Repository)); err != nil { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { return err } - // ProjectIssue saves relation from issue to a project - type ProjectIssue struct { - ID int64 `xorm:"pk autoincr"` - IssueID int64 `xorm:"INDEX"` - ProjectID int64 `xorm:"INDEX"` - ProjectBoardID int64 `xorm:"INDEX"` - } - - if err := x.Sync2(new(ProjectIssue)); err != nil { - return err - } - - type ProjectBoard struct { - ID int64 `xorm:"pk autoincr"` - Title string - Default bool `xorm:"NOT NULL DEFAULT false"` - - ProjectID int64 `xorm:"INDEX NOT NULL"` - CreatorID int64 `xorm:"NOT NULL"` - - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + switch { + case setting.Database.Type.IsMySQL(): + if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE language_stat MODIFY COLUMN language %s", sqlType)); err != nil { + return err + } + case setting.Database.Type.IsMSSQL(): + // Yet again MSSQL just has to be awkward. + // Here we have to drop the constraints first and then rebuild them + constraints := make([]string, 0) + if err := sess.SQL(`SELECT i.name AS Name + FROM sys.indexes i INNER JOIN sys.index_columns ic + ON i.index_id = ic.index_id AND i.object_id = ic.object_id + INNER JOIN sys.tables AS t + ON t.object_id = i.object_id + INNER JOIN sys.columns c + ON t.object_id = c.object_id AND ic.column_id = c.column_id + WHERE t.name = 'language_stat' AND c.name = 'language'`).Find(&constraints); err != nil { + return fmt.Errorf("Find constraints: %w", err) + } + for _, constraint := range constraints { + if _, err := sess.Exec(fmt.Sprintf("DROP INDEX [%s] ON `language_stat`", constraint)); err != nil { + return fmt.Errorf("Drop table `language_stat` constraint `%s`: %w", constraint, err) + } + } + if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE language_stat ALTER COLUMN language %s", sqlType)); err != nil { + return err + } + // Finally restore the constraint + if err := sess.CreateUniques(new(LanguageStat)); err != nil { + return err + } + case setting.Database.Type.IsPostgreSQL(): + if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE language_stat ALTER COLUMN language TYPE %s", sqlType)); err != nil { + return err + } } - return x.Sync2(new(ProjectBoard)) + return sess.Commit() } diff --git a/models/migrations/v1_13/v147.go b/models/migrations/v1_13/v147.go index d9c51145c4183..5db8b0a4374ac 100644 --- a/models/migrations/v1_13/v147.go +++ b/models/migrations/v1_13/v147.go @@ -9,145 +9,75 @@ import ( "xorm.io/xorm" ) -func CreateReviewsForCodeComments(x *xorm.Engine) error { - // Review - type Review struct { - ID int64 `xorm:"pk autoincr"` - Type int - ReviewerID int64 `xorm:"index"` - OriginalAuthor string - OriginalAuthorID int64 - IssueID int64 `xorm:"index"` - Content string `xorm:"TEXT"` - // Official is a review made by an assigned approver (counts towards approval) - Official bool `xorm:"NOT NULL DEFAULT false"` - CommitID string `xorm:"VARCHAR(40)"` - Stale bool `xorm:"NOT NULL DEFAULT false"` - - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` +func AddProjectsInfo(x *xorm.Engine) error { + // Create new tables + type ( + ProjectType uint8 + ProjectBoardType uint8 + ) + + type Project struct { + ID int64 `xorm:"pk autoincr"` + Title string `xorm:"INDEX NOT NULL"` + Description string `xorm:"TEXT"` + RepoID int64 `xorm:"INDEX"` + CreatorID int64 `xorm:"NOT NULL"` + IsClosed bool `xorm:"INDEX"` + + BoardType ProjectBoardType + Type ProjectType + + ClosedDateUnix timeutil.TimeStamp + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } - const ReviewTypeComment = 2 + if err := x.Sync2(new(Project)); err != nil { + return err + } - // Comment represents a comment in commit and issue page. type Comment struct { - ID int64 `xorm:"pk autoincr"` - Type int `xorm:"INDEX"` - PosterID int64 `xorm:"INDEX"` - OriginalAuthor string - OriginalAuthorID int64 - IssueID int64 `xorm:"INDEX"` - LabelID int64 - OldProjectID int64 - ProjectID int64 - OldMilestoneID int64 - MilestoneID int64 - AssigneeID int64 - RemovedAssignee bool - ResolveDoerID int64 - OldTitle string - NewTitle string - OldRef string - NewRef string - DependentIssueID int64 - - CommitID int64 - Line int64 // - previous line / + proposed line - TreePath string - Content string `xorm:"TEXT"` - - // Path represents the 4 lines of code cemented by this comment - PatchQuoted string `xorm:"TEXT patch"` - - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - - // Reference issue in commit message - CommitSHA string `xorm:"VARCHAR(40)"` + OldProjectID int64 + ProjectID int64 + } - ReviewID int64 `xorm:"index"` - Invalidated bool + if err := x.Sync2(new(Comment)); err != nil { + return err + } - // Reference an issue or pull from another comment, issue or PR - // All information is about the origin of the reference - RefRepoID int64 `xorm:"index"` // Repo where the referencing - RefIssueID int64 `xorm:"index"` - RefCommentID int64 `xorm:"index"` // 0 if origin is Issue title or content (or PR's) - RefAction int `xorm:"SMALLINT"` // What happens if RefIssueID resolves - RefIsPull bool + type Repository struct { + ID int64 + NumProjects int `xorm:"NOT NULL DEFAULT 0"` + NumClosedProjects int `xorm:"NOT NULL DEFAULT 0"` } - if err := x.Sync2(new(Review), new(Comment)); err != nil { + if err := x.Sync2(new(Repository)); err != nil { return err } - updateComment := func(comments []*Comment) error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - - for _, comment := range comments { - review := &Review{ - Type: ReviewTypeComment, - ReviewerID: comment.PosterID, - IssueID: comment.IssueID, - Official: false, - CommitID: comment.CommitSHA, - Stale: comment.Invalidated, - OriginalAuthor: comment.OriginalAuthor, - OriginalAuthorID: comment.OriginalAuthorID, - CreatedUnix: comment.CreatedUnix, - UpdatedUnix: comment.CreatedUnix, - } - if _, err := sess.NoAutoTime().Insert(review); err != nil { - return err - } - - reviewComment := &Comment{ - Type: 22, - PosterID: comment.PosterID, - Content: "", - IssueID: comment.IssueID, - ReviewID: review.ID, - OriginalAuthor: comment.OriginalAuthor, - OriginalAuthorID: comment.OriginalAuthorID, - CreatedUnix: comment.CreatedUnix, - UpdatedUnix: comment.CreatedUnix, - } - if _, err := sess.NoAutoTime().Insert(reviewComment); err != nil { - return err - } - - comment.ReviewID = review.ID - if _, err := sess.ID(comment.ID).Cols("review_id").NoAutoTime().Update(comment); err != nil { - return err - } - } - - return sess.Commit() + // ProjectIssue saves relation from issue to a project + type ProjectIssue struct { + ID int64 `xorm:"pk autoincr"` + IssueID int64 `xorm:"INDEX"` + ProjectID int64 `xorm:"INDEX"` + ProjectBoardID int64 `xorm:"INDEX"` } - start := 0 - batchSize := 100 - for { - comments := make([]*Comment, 0, batchSize) - if err := x.Where("review_id = 0 and type = 21").Limit(batchSize, start).Find(&comments); err != nil { - return err - } + if err := x.Sync2(new(ProjectIssue)); err != nil { + return err + } - if err := updateComment(comments); err != nil { - return err - } + type ProjectBoard struct { + ID int64 `xorm:"pk autoincr"` + Title string + Default bool `xorm:"NOT NULL DEFAULT false"` - start += len(comments) + ProjectID int64 `xorm:"INDEX NOT NULL"` + CreatorID int64 `xorm:"NOT NULL"` - if len(comments) < batchSize { - break - } + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } - return nil + return x.Sync2(new(ProjectBoard)) } diff --git a/models/migrations/v1_13/v148.go b/models/migrations/v1_13/v148.go index 7bb8ab700b6a5..d9c51145c4183 100644 --- a/models/migrations/v1_13/v148.go +++ b/models/migrations/v1_13/v148.go @@ -4,10 +4,150 @@ package v1_13 //nolint import ( + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func PurgeInvalidDependenciesComments(x *xorm.Engine) error { - _, err := x.Exec("DELETE FROM comment WHERE dependent_issue_id != 0 AND dependent_issue_id NOT IN (SELECT id FROM issue)") - return err +func CreateReviewsForCodeComments(x *xorm.Engine) error { + // Review + type Review struct { + ID int64 `xorm:"pk autoincr"` + Type int + ReviewerID int64 `xorm:"index"` + OriginalAuthor string + OriginalAuthorID int64 + IssueID int64 `xorm:"index"` + Content string `xorm:"TEXT"` + // Official is a review made by an assigned approver (counts towards approval) + Official bool `xorm:"NOT NULL DEFAULT false"` + CommitID string `xorm:"VARCHAR(40)"` + Stale bool `xorm:"NOT NULL DEFAULT false"` + + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + + const ReviewTypeComment = 2 + + // Comment represents a comment in commit and issue page. + type Comment struct { + ID int64 `xorm:"pk autoincr"` + Type int `xorm:"INDEX"` + PosterID int64 `xorm:"INDEX"` + OriginalAuthor string + OriginalAuthorID int64 + IssueID int64 `xorm:"INDEX"` + LabelID int64 + OldProjectID int64 + ProjectID int64 + OldMilestoneID int64 + MilestoneID int64 + AssigneeID int64 + RemovedAssignee bool + ResolveDoerID int64 + OldTitle string + NewTitle string + OldRef string + NewRef string + DependentIssueID int64 + + CommitID int64 + Line int64 // - previous line / + proposed line + TreePath string + Content string `xorm:"TEXT"` + + // Path represents the 4 lines of code cemented by this comment + PatchQuoted string `xorm:"TEXT patch"` + + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + + // Reference issue in commit message + CommitSHA string `xorm:"VARCHAR(40)"` + + ReviewID int64 `xorm:"index"` + Invalidated bool + + // Reference an issue or pull from another comment, issue or PR + // All information is about the origin of the reference + RefRepoID int64 `xorm:"index"` // Repo where the referencing + RefIssueID int64 `xorm:"index"` + RefCommentID int64 `xorm:"index"` // 0 if origin is Issue title or content (or PR's) + RefAction int `xorm:"SMALLINT"` // What happens if RefIssueID resolves + RefIsPull bool + } + + if err := x.Sync2(new(Review), new(Comment)); err != nil { + return err + } + + updateComment := func(comments []*Comment) error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + + for _, comment := range comments { + review := &Review{ + Type: ReviewTypeComment, + ReviewerID: comment.PosterID, + IssueID: comment.IssueID, + Official: false, + CommitID: comment.CommitSHA, + Stale: comment.Invalidated, + OriginalAuthor: comment.OriginalAuthor, + OriginalAuthorID: comment.OriginalAuthorID, + CreatedUnix: comment.CreatedUnix, + UpdatedUnix: comment.CreatedUnix, + } + if _, err := sess.NoAutoTime().Insert(review); err != nil { + return err + } + + reviewComment := &Comment{ + Type: 22, + PosterID: comment.PosterID, + Content: "", + IssueID: comment.IssueID, + ReviewID: review.ID, + OriginalAuthor: comment.OriginalAuthor, + OriginalAuthorID: comment.OriginalAuthorID, + CreatedUnix: comment.CreatedUnix, + UpdatedUnix: comment.CreatedUnix, + } + if _, err := sess.NoAutoTime().Insert(reviewComment); err != nil { + return err + } + + comment.ReviewID = review.ID + if _, err := sess.ID(comment.ID).Cols("review_id").NoAutoTime().Update(comment); err != nil { + return err + } + } + + return sess.Commit() + } + + start := 0 + batchSize := 100 + for { + comments := make([]*Comment, 0, batchSize) + if err := x.Where("review_id = 0 and type = 21").Limit(batchSize, start).Find(&comments); err != nil { + return err + } + + if err := updateComment(comments); err != nil { + return err + } + + start += len(comments) + + if len(comments) < batchSize { + break + } + } + + return nil } diff --git a/models/migrations/v1_13/v149.go b/models/migrations/v1_13/v149.go index e093b4f9db8d6..7bb8ab700b6a5 100644 --- a/models/migrations/v1_13/v149.go +++ b/models/migrations/v1_13/v149.go @@ -4,21 +4,10 @@ package v1_13 //nolint import ( - "fmt" - - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func AddCreatedAndUpdatedToMilestones(x *xorm.Engine) error { - type Milestone struct { - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - } - - if err := x.Sync2(new(Milestone)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - return nil +func PurgeInvalidDependenciesComments(x *xorm.Engine) error { + _, err := x.Exec("DELETE FROM comment WHERE dependent_issue_id != 0 AND dependent_issue_id NOT IN (SELECT id FROM issue)") + return err } diff --git a/models/migrations/v1_13/v150.go b/models/migrations/v1_13/v150.go index d5ba489566545..e093b4f9db8d6 100644 --- a/models/migrations/v1_13/v150.go +++ b/models/migrations/v1_13/v150.go @@ -4,36 +4,21 @@ package v1_13 //nolint import ( - "code.gitea.io/gitea/models/migrations/base" + "fmt" + "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" ) -func AddPrimaryKeyToRepoTopic(x *xorm.Engine) error { - // Topic represents a topic of repositories - type Topic struct { - ID int64 `xorm:"pk autoincr"` - Name string `xorm:"UNIQUE VARCHAR(25)"` - RepoCount int +func AddCreatedAndUpdatedToMilestones(x *xorm.Engine) error { + type Milestone struct { CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } - // RepoTopic represents associated repositories and topics - type RepoTopic struct { - RepoID int64 `xorm:"pk"` - TopicID int64 `xorm:"pk"` - } - - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err + if err := x.Sync2(new(Milestone)); err != nil { + return fmt.Errorf("Sync2: %w", err) } - - base.RecreateTable(sess, &Topic{}) - base.RecreateTable(sess, &RepoTopic{}) - - return sess.Commit() + return nil } diff --git a/models/migrations/v1_13/v151.go b/models/migrations/v1_13/v151.go index 9aa71ec29f13f..d5ba489566545 100644 --- a/models/migrations/v1_13/v151.go +++ b/models/migrations/v1_13/v151.go @@ -4,95 +4,26 @@ package v1_13 //nolint import ( - "context" - "fmt" - "strings" - - "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" - "xorm.io/xorm/schemas" ) -func SetDefaultPasswordToArgon2(x *xorm.Engine) error { - switch { - case setting.Database.Type.IsMySQL(): - _, err := x.Exec("ALTER TABLE `user` ALTER passwd_hash_algo SET DEFAULT 'argon2';") - return err - case setting.Database.Type.IsPostgreSQL(): - _, err := x.Exec("ALTER TABLE `user` ALTER COLUMN passwd_hash_algo SET DEFAULT 'argon2';") - return err - case setting.Database.Type.IsMSSQL(): - // need to find the constraint and drop it, then recreate it. - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - res, err := sess.QueryString("SELECT [name] FROM sys.default_constraints WHERE parent_object_id=OBJECT_ID(?) AND COL_NAME(parent_object_id, parent_column_id)=?;", "user", "passwd_hash_algo") - if err != nil { - return err - } - if len(res) > 0 { - constraintName := res[0]["name"] - log.Error("Results of select constraint: %s", constraintName) - _, err := sess.Exec("ALTER TABLE [user] DROP CONSTRAINT " + constraintName) - if err != nil { - return err - } - _, err = sess.Exec("ALTER TABLE [user] ADD CONSTRAINT " + constraintName + " DEFAULT 'argon2' FOR passwd_hash_algo") - if err != nil { - return err - } - } else { - _, err := sess.Exec("ALTER TABLE [user] ADD DEFAULT('argon2') FOR passwd_hash_algo") - if err != nil { - return err - } - } - return sess.Commit() - - case setting.Database.Type.IsSQLite3(): - // drop through - default: - log.Fatal("Unrecognized DB") - } - - tables, err := x.DBMetas() - if err != nil { - return err +func AddPrimaryKeyToRepoTopic(x *xorm.Engine) error { + // Topic represents a topic of repositories + type Topic struct { + ID int64 `xorm:"pk autoincr"` + Name string `xorm:"UNIQUE VARCHAR(25)"` + RepoCount int + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } - // Now for SQLite we have to recreate the table - var table *schemas.Table - tableName := "user" - - for _, table = range tables { - if table.Name == tableName { - break - } - } - if table == nil || table.Name != tableName { - type User struct { - PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"` - } - return x.Sync2(new(User)) - } - column := table.GetColumn("passwd_hash_algo") - if column == nil { - type User struct { - PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"` - } - return x.Sync2(new(User)) - } - - tempTableName := "tmp_recreate__user" - column.Default = "'argon2'" - - createTableSQL, _, err := x.Dialect().CreateTableSQL(context.Background(), x.DB(), table, tempTableName) - if err != nil { - return err + // RepoTopic represents associated repositories and topics + type RepoTopic struct { + RepoID int64 `xorm:"pk"` + TopicID int64 `xorm:"pk"` } sess := x.NewSession() @@ -100,97 +31,9 @@ func SetDefaultPasswordToArgon2(x *xorm.Engine) error { if err := sess.Begin(); err != nil { return err } - if _, err := sess.Exec(createTableSQL); err != nil { - log.Error("Unable to create table %s. Error: %v\n", tempTableName, err, createTableSQL) - return err - } - for _, index := range table.Indexes { - if _, err := sess.Exec(x.Dialect().CreateIndexSQL(tempTableName, index)); err != nil { - log.Error("Unable to create indexes on temporary table %s. Error: %v", tempTableName, err) - return err - } - } - newTableColumns := table.Columns() - if len(newTableColumns) == 0 { - return fmt.Errorf("no columns in new table") - } - hasID := false - for _, column := range newTableColumns { - hasID = hasID || (column.IsPrimaryKey && column.IsAutoIncrement) - } - - sqlStringBuilder := &strings.Builder{} - _, _ = sqlStringBuilder.WriteString("INSERT INTO `") - _, _ = sqlStringBuilder.WriteString(tempTableName) - _, _ = sqlStringBuilder.WriteString("` (`") - _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name) - _, _ = sqlStringBuilder.WriteString("`") - for _, column := range newTableColumns[1:] { - _, _ = sqlStringBuilder.WriteString(", `") - _, _ = sqlStringBuilder.WriteString(column.Name) - _, _ = sqlStringBuilder.WriteString("`") - } - _, _ = sqlStringBuilder.WriteString(")") - _, _ = sqlStringBuilder.WriteString(" SELECT ") - if newTableColumns[0].Default != "" { - _, _ = sqlStringBuilder.WriteString("COALESCE(`") - _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name) - _, _ = sqlStringBuilder.WriteString("`, ") - _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Default) - _, _ = sqlStringBuilder.WriteString(")") - } else { - _, _ = sqlStringBuilder.WriteString("`") - _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name) - _, _ = sqlStringBuilder.WriteString("`") - } - - for _, column := range newTableColumns[1:] { - if column.Default != "" { - _, _ = sqlStringBuilder.WriteString(", COALESCE(`") - _, _ = sqlStringBuilder.WriteString(column.Name) - _, _ = sqlStringBuilder.WriteString("`, ") - _, _ = sqlStringBuilder.WriteString(column.Default) - _, _ = sqlStringBuilder.WriteString(")") - } else { - _, _ = sqlStringBuilder.WriteString(", `") - _, _ = sqlStringBuilder.WriteString(column.Name) - _, _ = sqlStringBuilder.WriteString("`") - } - } - _, _ = sqlStringBuilder.WriteString(" FROM `") - _, _ = sqlStringBuilder.WriteString(tableName) - _, _ = sqlStringBuilder.WriteString("`") - - if _, err := sess.Exec(sqlStringBuilder.String()); err != nil { - log.Error("Unable to set copy data in to temp table %s. Error: %v", tempTableName, err) - return err - } - - // SQLite will drop all the constraints on the old table - if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil { - log.Error("Unable to drop old table %s. Error: %v", tableName, err) - return err - } - - for _, index := range table.Indexes { - if _, err := sess.Exec(x.Dialect().DropIndexSQL(tempTableName, index)); err != nil { - log.Error("Unable to drop indexes on temporary table %s. Error: %v", tempTableName, err) - return err - } - } - - if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `%s` RENAME TO `%s`", tempTableName, tableName)); err != nil { - log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err) - return err - } - - for _, index := range table.Indexes { - if _, err := sess.Exec(x.Dialect().CreateIndexSQL(tableName, index)); err != nil { - log.Error("Unable to recreate indexes on table %s. Error: %v", tableName, err) - return err - } - } + base.RecreateTable(sess, &Topic{}) + base.RecreateTable(sess, &RepoTopic{}) return sess.Commit() } diff --git a/models/migrations/v1_13/v152.go b/models/migrations/v1_13/v152.go index 7f7c414de8a97..9aa71ec29f13f 100644 --- a/models/migrations/v1_13/v152.go +++ b/models/migrations/v1_13/v152.go @@ -3,11 +3,194 @@ package v1_13 //nolint -import "xorm.io/xorm" +import ( + "context" + "fmt" + "strings" -func AddTrustModelToRepository(x *xorm.Engine) error { - type Repository struct { - TrustModel int + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + + "xorm.io/xorm" + "xorm.io/xorm/schemas" +) + +func SetDefaultPasswordToArgon2(x *xorm.Engine) error { + switch { + case setting.Database.Type.IsMySQL(): + _, err := x.Exec("ALTER TABLE `user` ALTER passwd_hash_algo SET DEFAULT 'argon2';") + return err + case setting.Database.Type.IsPostgreSQL(): + _, err := x.Exec("ALTER TABLE `user` ALTER COLUMN passwd_hash_algo SET DEFAULT 'argon2';") + return err + case setting.Database.Type.IsMSSQL(): + // need to find the constraint and drop it, then recreate it. + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + res, err := sess.QueryString("SELECT [name] FROM sys.default_constraints WHERE parent_object_id=OBJECT_ID(?) AND COL_NAME(parent_object_id, parent_column_id)=?;", "user", "passwd_hash_algo") + if err != nil { + return err + } + if len(res) > 0 { + constraintName := res[0]["name"] + log.Error("Results of select constraint: %s", constraintName) + _, err := sess.Exec("ALTER TABLE [user] DROP CONSTRAINT " + constraintName) + if err != nil { + return err + } + _, err = sess.Exec("ALTER TABLE [user] ADD CONSTRAINT " + constraintName + " DEFAULT 'argon2' FOR passwd_hash_algo") + if err != nil { + return err + } + } else { + _, err := sess.Exec("ALTER TABLE [user] ADD DEFAULT('argon2') FOR passwd_hash_algo") + if err != nil { + return err + } + } + return sess.Commit() + + case setting.Database.Type.IsSQLite3(): + // drop through + default: + log.Fatal("Unrecognized DB") + } + + tables, err := x.DBMetas() + if err != nil { + return err + } + + // Now for SQLite we have to recreate the table + var table *schemas.Table + tableName := "user" + + for _, table = range tables { + if table.Name == tableName { + break + } + } + if table == nil || table.Name != tableName { + type User struct { + PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"` + } + return x.Sync2(new(User)) + } + column := table.GetColumn("passwd_hash_algo") + if column == nil { + type User struct { + PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"` + } + return x.Sync2(new(User)) + } + + tempTableName := "tmp_recreate__user" + column.Default = "'argon2'" + + createTableSQL, _, err := x.Dialect().CreateTableSQL(context.Background(), x.DB(), table, tempTableName) + if err != nil { + return err + } + + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + if _, err := sess.Exec(createTableSQL); err != nil { + log.Error("Unable to create table %s. Error: %v\n", tempTableName, err, createTableSQL) + return err + } + for _, index := range table.Indexes { + if _, err := sess.Exec(x.Dialect().CreateIndexSQL(tempTableName, index)); err != nil { + log.Error("Unable to create indexes on temporary table %s. Error: %v", tempTableName, err) + return err + } + } + + newTableColumns := table.Columns() + if len(newTableColumns) == 0 { + return fmt.Errorf("no columns in new table") + } + hasID := false + for _, column := range newTableColumns { + hasID = hasID || (column.IsPrimaryKey && column.IsAutoIncrement) + } + + sqlStringBuilder := &strings.Builder{} + _, _ = sqlStringBuilder.WriteString("INSERT INTO `") + _, _ = sqlStringBuilder.WriteString(tempTableName) + _, _ = sqlStringBuilder.WriteString("` (`") + _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name) + _, _ = sqlStringBuilder.WriteString("`") + for _, column := range newTableColumns[1:] { + _, _ = sqlStringBuilder.WriteString(", `") + _, _ = sqlStringBuilder.WriteString(column.Name) + _, _ = sqlStringBuilder.WriteString("`") + } + _, _ = sqlStringBuilder.WriteString(")") + _, _ = sqlStringBuilder.WriteString(" SELECT ") + if newTableColumns[0].Default != "" { + _, _ = sqlStringBuilder.WriteString("COALESCE(`") + _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name) + _, _ = sqlStringBuilder.WriteString("`, ") + _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Default) + _, _ = sqlStringBuilder.WriteString(")") + } else { + _, _ = sqlStringBuilder.WriteString("`") + _, _ = sqlStringBuilder.WriteString(newTableColumns[0].Name) + _, _ = sqlStringBuilder.WriteString("`") + } + + for _, column := range newTableColumns[1:] { + if column.Default != "" { + _, _ = sqlStringBuilder.WriteString(", COALESCE(`") + _, _ = sqlStringBuilder.WriteString(column.Name) + _, _ = sqlStringBuilder.WriteString("`, ") + _, _ = sqlStringBuilder.WriteString(column.Default) + _, _ = sqlStringBuilder.WriteString(")") + } else { + _, _ = sqlStringBuilder.WriteString(", `") + _, _ = sqlStringBuilder.WriteString(column.Name) + _, _ = sqlStringBuilder.WriteString("`") + } + } + _, _ = sqlStringBuilder.WriteString(" FROM `") + _, _ = sqlStringBuilder.WriteString(tableName) + _, _ = sqlStringBuilder.WriteString("`") + + if _, err := sess.Exec(sqlStringBuilder.String()); err != nil { + log.Error("Unable to set copy data in to temp table %s. Error: %v", tempTableName, err) + return err } - return x.Sync2(new(Repository)) + + // SQLite will drop all the constraints on the old table + if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil { + log.Error("Unable to drop old table %s. Error: %v", tableName, err) + return err + } + + for _, index := range table.Indexes { + if _, err := sess.Exec(x.Dialect().DropIndexSQL(tempTableName, index)); err != nil { + log.Error("Unable to drop indexes on temporary table %s. Error: %v", tempTableName, err) + return err + } + } + + if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `%s` RENAME TO `%s`", tempTableName, tableName)); err != nil { + log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err) + return err + } + + for _, index := range table.Indexes { + if _, err := sess.Exec(x.Dialect().CreateIndexSQL(tableName, index)); err != nil { + log.Error("Unable to recreate indexes on table %s. Error: %v", tableName, err) + return err + } + } + + return sess.Commit() } diff --git a/models/migrations/v1_13/v153.go b/models/migrations/v1_13/v153.go index 4146d83387845..7f7c414de8a97 100644 --- a/models/migrations/v1_13/v153.go +++ b/models/migrations/v1_13/v153.go @@ -3,22 +3,11 @@ package v1_13 //nolint -import ( - "xorm.io/xorm" -) +import "xorm.io/xorm" -func AddTeamReviewRequestSupport(x *xorm.Engine) error { - type Review struct { - ReviewerTeamID int64 `xorm:"NOT NULL DEFAULT 0"` +func AddTrustModelToRepository(x *xorm.Engine) error { + type Repository struct { + TrustModel int } - - type Comment struct { - AssigneeTeamID int64 `xorm:"NOT NULL DEFAULT 0"` - } - - if err := x.Sync2(new(Review)); err != nil { - return err - } - - return x.Sync2(new(Comment)) + return x.Sync2(new(Repository)) } diff --git a/models/migrations/v1_13/v154.go b/models/migrations/v1_13/v154.go index 8b4498be847ad..4146d83387845 100644 --- a/models/migrations/v1_13/v154.go +++ b/models/migrations/v1_13/v154.go @@ -4,52 +4,21 @@ package v1_13 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func AddTimeStamps(x *xorm.Engine) error { - // this will add timestamps where it is useful to have - - // Star represents a starred repo by an user. - type Star struct { - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - } - if err := x.Sync2(new(Star)); err != nil { - return err - } - - // Label represents a label of repository for issues. - type Label struct { - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - } - if err := x.Sync2(new(Label)); err != nil { - return err +func AddTeamReviewRequestSupport(x *xorm.Engine) error { + type Review struct { + ReviewerTeamID int64 `xorm:"NOT NULL DEFAULT 0"` } - // Follow represents relations of user and their followers. - type Follow struct { - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - } - if err := x.Sync2(new(Follow)); err != nil { - return err + type Comment struct { + AssigneeTeamID int64 `xorm:"NOT NULL DEFAULT 0"` } - // Watch is connection request for receiving repository notification. - type Watch struct { - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - } - if err := x.Sync2(new(Watch)); err != nil { + if err := x.Sync2(new(Review)); err != nil { return err } - // Collaboration represent the relation between an individual and a repository. - type Collaboration struct { - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - } - return x.Sync2(new(Collaboration)) + return x.Sync2(new(Comment)) } diff --git a/models/migrations/v1_13/v155.go b/models/migrations/v1_13/v155.go new file mode 100644 index 0000000000000..8b4498be847ad --- /dev/null +++ b/models/migrations/v1_13/v155.go @@ -0,0 +1,55 @@ +// Copyright 2020 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_13 //nolint + +import ( + "code.gitea.io/gitea/modules/timeutil" + + "xorm.io/xorm" +) + +func AddTimeStamps(x *xorm.Engine) error { + // this will add timestamps where it is useful to have + + // Star represents a starred repo by an user. + type Star struct { + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + } + if err := x.Sync2(new(Star)); err != nil { + return err + } + + // Label represents a label of repository for issues. + type Label struct { + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + if err := x.Sync2(new(Label)); err != nil { + return err + } + + // Follow represents relations of user and their followers. + type Follow struct { + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + } + if err := x.Sync2(new(Follow)); err != nil { + return err + } + + // Watch is connection request for receiving repository notification. + type Watch struct { + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + if err := x.Sync2(new(Watch)); err != nil { + return err + } + + // Collaboration represent the relation between an individual and a repository. + type Collaboration struct { + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + return x.Sync2(new(Collaboration)) +} diff --git a/models/migrations/v1_14/v155.go b/models/migrations/v1_14/v155.go deleted file mode 100644 index 7c5dc5fbe278d..0000000000000 --- a/models/migrations/v1_14/v155.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2020 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_14 //nolint - -import ( - "fmt" - - "xorm.io/xorm" -) - -func AddChangedProtectedFilesPullRequestColumn(x *xorm.Engine) error { - type PullRequest struct { - ChangedProtectedFiles []string `xorm:"TEXT JSON"` - } - - if err := x.Sync2(new(PullRequest)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - return nil -} diff --git a/models/migrations/v1_14/v156.go b/models/migrations/v1_14/v156.go index 2cf4954a15f58..7c5dc5fbe278d 100644 --- a/models/migrations/v1_14/v156.go +++ b/models/migrations/v1_14/v156.go @@ -5,173 +5,17 @@ package v1_14 //nolint import ( "fmt" - "path/filepath" - "strings" - - "code.gitea.io/gitea/modules/git" - "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/setting" "xorm.io/xorm" ) -// Copy paste from models/repo.go because we cannot import models package -func repoPath(userName, repoName string) string { - return filepath.Join(userPath(userName), strings.ToLower(repoName)+".git") -} - -func userPath(userName string) string { - return filepath.Join(setting.RepoRootPath, strings.ToLower(userName)) -} - -func FixPublisherIDforTagReleases(x *xorm.Engine) error { - type Release struct { - ID int64 - RepoID int64 - Sha1 string - TagName string - PublisherID int64 - } - - type Repository struct { - ID int64 - OwnerID int64 - OwnerName string - Name string +func AddChangedProtectedFilesPullRequestColumn(x *xorm.Engine) error { + type PullRequest struct { + ChangedProtectedFiles []string `xorm:"TEXT JSON"` } - type User struct { - ID int64 - Name string - Email string + if err := x.Sync2(new(PullRequest)); err != nil { + return fmt.Errorf("Sync2: %w", err) } - - const batchSize = 100 - sess := x.NewSession() - defer sess.Close() - - var ( - repo *Repository - gitRepo *git.Repository - user *User - ) - defer func() { - if gitRepo != nil { - gitRepo.Close() - } - }() - for start := 0; ; start += batchSize { - releases := make([]*Release, 0, batchSize) - - if err := sess.Begin(); err != nil { - return err - } - - if err := sess.Limit(batchSize, start). - Where("publisher_id = 0 OR publisher_id is null"). - Asc("repo_id", "id").Where("is_tag=?", true). - Find(&releases); err != nil { - return err - } - - if len(releases) == 0 { - break - } - - for _, release := range releases { - if repo == nil || repo.ID != release.RepoID { - if gitRepo != nil { - gitRepo.Close() - gitRepo = nil - } - repo = new(Repository) - has, err := sess.ID(release.RepoID).Get(repo) - if err != nil { - log.Error("Error whilst loading repository[%d] for release[%d] with tag name %s. Error: %v", release.RepoID, release.ID, release.TagName, err) - return err - } else if !has { - log.Warn("Release[%d] is orphaned and refers to non-existing repository %d", release.ID, release.RepoID) - log.Warn("This release should be deleted") - continue - } - - if repo.OwnerName == "" { - // v120.go migration may not have been run correctly - we'll just replicate it here - // because this appears to be a common-ish problem. - if _, err := sess.Exec("UPDATE repository SET owner_name = (SELECT name FROM `user` WHERE `user`.id = repository.owner_id)"); err != nil { - log.Error("Error whilst updating repository[%d] owner name", repo.ID) - return err - } - - if _, err := sess.ID(release.RepoID).Get(repo); err != nil { - log.Error("Error whilst loading repository[%d] for release[%d] with tag name %s. Error: %v", release.RepoID, release.ID, release.TagName, err) - return err - } - } - gitRepo, err = git.OpenRepository(git.DefaultContext, repoPath(repo.OwnerName, repo.Name)) - if err != nil { - log.Error("Error whilst opening git repo for [%d]%s/%s. Error: %v", repo.ID, repo.OwnerName, repo.Name, err) - return err - } - } - - commit, err := gitRepo.GetTagCommit(release.TagName) - if err != nil { - if git.IsErrNotExist(err) { - log.Warn("Unable to find commit %s for Tag: %s in [%d]%s/%s. Cannot update publisher ID.", err.(git.ErrNotExist).ID, release.TagName, repo.ID, repo.OwnerName, repo.Name) - continue - } - log.Error("Error whilst getting commit for Tag: %s in [%d]%s/%s. Error: %v", release.TagName, repo.ID, repo.OwnerName, repo.Name, err) - return fmt.Errorf("GetTagCommit: %w", err) - } - - if commit.Author.Email == "" { - log.Warn("Tag: %s in Repo[%d]%s/%s does not have a tagger.", release.TagName, repo.ID, repo.OwnerName, repo.Name) - commit, err = gitRepo.GetCommit(commit.ID.String()) - if err != nil { - if git.IsErrNotExist(err) { - log.Warn("Unable to find commit %s for Tag: %s in [%d]%s/%s. Cannot update publisher ID.", err.(git.ErrNotExist).ID, release.TagName, repo.ID, repo.OwnerName, repo.Name) - continue - } - log.Error("Error whilst getting commit for Tag: %s in [%d]%s/%s. Error: %v", release.TagName, repo.ID, repo.OwnerName, repo.Name, err) - return fmt.Errorf("GetCommit: %w", err) - } - } - - if commit.Author.Email == "" { - log.Warn("Tag: %s in Repo[%d]%s/%s does not have a Tagger and its underlying commit does not have an Author either!", release.TagName, repo.ID, repo.OwnerName, repo.Name) - continue - } - - if user == nil || !strings.EqualFold(user.Email, commit.Author.Email) { - user = new(User) - _, err = sess.Where("email=?", commit.Author.Email).Get(user) - if err != nil { - log.Error("Error whilst getting commit author by email: %s for Tag: %s in [%d]%s/%s. Error: %v", commit.Author.Email, release.TagName, repo.ID, repo.OwnerName, repo.Name, err) - return err - } - - user.Email = commit.Author.Email - } - - if user.ID <= 0 { - continue - } - - release.PublisherID = user.ID - if _, err := sess.ID(release.ID).Cols("publisher_id").Update(release); err != nil { - log.Error("Error whilst updating publisher[%d] for release[%d] with tag name %s. Error: %v", release.PublisherID, release.ID, release.TagName, err) - return err - } - } - if gitRepo != nil { - gitRepo.Close() - } - - if err := sess.Commit(); err != nil { - return err - } - } - return nil } diff --git a/models/migrations/v1_14/v157.go b/models/migrations/v1_14/v157.go index 7187278d29427..2cf4954a15f58 100644 --- a/models/migrations/v1_14/v157.go +++ b/models/migrations/v1_14/v157.go @@ -4,58 +4,169 @@ package v1_14 //nolint import ( + "fmt" + "path/filepath" + "strings" + + "code.gitea.io/gitea/modules/git" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + "xorm.io/xorm" ) -func FixRepoTopics(x *xorm.Engine) error { - type Topic struct { //nolint:unused - ID int64 `xorm:"pk autoincr"` - Name string `xorm:"UNIQUE VARCHAR(25)"` - RepoCount int - } +// Copy paste from models/repo.go because we cannot import models package +func repoPath(userName, repoName string) string { + return filepath.Join(userPath(userName), strings.ToLower(repoName)+".git") +} - type RepoTopic struct { //nolint:unused - RepoID int64 `xorm:"pk"` - TopicID int64 `xorm:"pk"` +func userPath(userName string) string { + return filepath.Join(setting.RepoRootPath, strings.ToLower(userName)) +} + +func FixPublisherIDforTagReleases(x *xorm.Engine) error { + type Release struct { + ID int64 + RepoID int64 + Sha1 string + TagName string + PublisherID int64 } type Repository struct { - ID int64 `xorm:"pk autoincr"` - Topics []string `xorm:"TEXT JSON"` + ID int64 + OwnerID int64 + OwnerName string + Name string + } + + type User struct { + ID int64 + Name string + Email string } const batchSize = 100 sess := x.NewSession() defer sess.Close() - repos := make([]*Repository, 0, batchSize) - topics := make([]string, 0, batchSize) + + var ( + repo *Repository + gitRepo *git.Repository + user *User + ) + defer func() { + if gitRepo != nil { + gitRepo.Close() + } + }() for start := 0; ; start += batchSize { - repos = repos[:0] + releases := make([]*Release, 0, batchSize) if err := sess.Begin(); err != nil { return err } - if err := sess.Limit(batchSize, start).Find(&repos); err != nil { + if err := sess.Limit(batchSize, start). + Where("publisher_id = 0 OR publisher_id is null"). + Asc("repo_id", "id").Where("is_tag=?", true). + Find(&releases); err != nil { return err } - if len(repos) == 0 { + if len(releases) == 0 { break } - for _, repo := range repos { - topics = topics[:0] - if err := sess.Select("name").Table("topic"). - Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id"). - Where("repo_topic.repo_id = ?", repo.ID).Desc("topic.repo_count").Find(&topics); err != nil { - return err + for _, release := range releases { + if repo == nil || repo.ID != release.RepoID { + if gitRepo != nil { + gitRepo.Close() + gitRepo = nil + } + repo = new(Repository) + has, err := sess.ID(release.RepoID).Get(repo) + if err != nil { + log.Error("Error whilst loading repository[%d] for release[%d] with tag name %s. Error: %v", release.RepoID, release.ID, release.TagName, err) + return err + } else if !has { + log.Warn("Release[%d] is orphaned and refers to non-existing repository %d", release.ID, release.RepoID) + log.Warn("This release should be deleted") + continue + } + + if repo.OwnerName == "" { + // v120.go migration may not have been run correctly - we'll just replicate it here + // because this appears to be a common-ish problem. + if _, err := sess.Exec("UPDATE repository SET owner_name = (SELECT name FROM `user` WHERE `user`.id = repository.owner_id)"); err != nil { + log.Error("Error whilst updating repository[%d] owner name", repo.ID) + return err + } + + if _, err := sess.ID(release.RepoID).Get(repo); err != nil { + log.Error("Error whilst loading repository[%d] for release[%d] with tag name %s. Error: %v", release.RepoID, release.ID, release.TagName, err) + return err + } + } + gitRepo, err = git.OpenRepository(git.DefaultContext, repoPath(repo.OwnerName, repo.Name)) + if err != nil { + log.Error("Error whilst opening git repo for [%d]%s/%s. Error: %v", repo.ID, repo.OwnerName, repo.Name, err) + return err + } } - repo.Topics = topics - if _, err := sess.ID(repo.ID).Cols("topics").Update(repo); err != nil { + + commit, err := gitRepo.GetTagCommit(release.TagName) + if err != nil { + if git.IsErrNotExist(err) { + log.Warn("Unable to find commit %s for Tag: %s in [%d]%s/%s. Cannot update publisher ID.", err.(git.ErrNotExist).ID, release.TagName, repo.ID, repo.OwnerName, repo.Name) + continue + } + log.Error("Error whilst getting commit for Tag: %s in [%d]%s/%s. Error: %v", release.TagName, repo.ID, repo.OwnerName, repo.Name, err) + return fmt.Errorf("GetTagCommit: %w", err) + } + + if commit.Author.Email == "" { + log.Warn("Tag: %s in Repo[%d]%s/%s does not have a tagger.", release.TagName, repo.ID, repo.OwnerName, repo.Name) + commit, err = gitRepo.GetCommit(commit.ID.String()) + if err != nil { + if git.IsErrNotExist(err) { + log.Warn("Unable to find commit %s for Tag: %s in [%d]%s/%s. Cannot update publisher ID.", err.(git.ErrNotExist).ID, release.TagName, repo.ID, repo.OwnerName, repo.Name) + continue + } + log.Error("Error whilst getting commit for Tag: %s in [%d]%s/%s. Error: %v", release.TagName, repo.ID, repo.OwnerName, repo.Name, err) + return fmt.Errorf("GetCommit: %w", err) + } + } + + if commit.Author.Email == "" { + log.Warn("Tag: %s in Repo[%d]%s/%s does not have a Tagger and its underlying commit does not have an Author either!", release.TagName, repo.ID, repo.OwnerName, repo.Name) + continue + } + + if user == nil || !strings.EqualFold(user.Email, commit.Author.Email) { + user = new(User) + _, err = sess.Where("email=?", commit.Author.Email).Get(user) + if err != nil { + log.Error("Error whilst getting commit author by email: %s for Tag: %s in [%d]%s/%s. Error: %v", commit.Author.Email, release.TagName, repo.ID, repo.OwnerName, repo.Name, err) + return err + } + + user.Email = commit.Author.Email + } + + if user.ID <= 0 { + continue + } + + release.PublisherID = user.ID + if _, err := sess.ID(release.ID).Cols("publisher_id").Update(release); err != nil { + log.Error("Error whilst updating publisher[%d] for release[%d] with tag name %s. Error: %v", release.PublisherID, release.ID, release.TagName, err) return err } } + if gitRepo != nil { + gitRepo.Close() + } if err := sess.Commit(); err != nil { return err diff --git a/models/migrations/v1_14/v158.go b/models/migrations/v1_14/v158.go index 2029829ff9901..7187278d29427 100644 --- a/models/migrations/v1_14/v158.go +++ b/models/migrations/v1_14/v158.go @@ -4,107 +4,62 @@ package v1_14 //nolint import ( - "fmt" - "strconv" - - "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/setting" - "xorm.io/xorm" ) -func UpdateCodeCommentReplies(x *xorm.Engine) error { - type Comment struct { - ID int64 `xorm:"pk autoincr"` - CommitSHA string `xorm:"VARCHAR(40)"` - Patch string `xorm:"TEXT patch"` - Invalidated bool - - // Not extracted but used in the below query - Type int `xorm:"INDEX"` - Line int64 // - previous line / + proposed line - TreePath string - ReviewID int64 `xorm:"index"` +func FixRepoTopics(x *xorm.Engine) error { + type Topic struct { //nolint:unused + ID int64 `xorm:"pk autoincr"` + Name string `xorm:"UNIQUE VARCHAR(25)"` + RepoCount int } - if err := x.Sync2(new(Comment)); err != nil { - return err + type RepoTopic struct { //nolint:unused + RepoID int64 `xorm:"pk"` + TopicID int64 `xorm:"pk"` } - sqlSelect := `SELECT comment.id as id, first.commit_sha as commit_sha, first.patch as patch, first.invalidated as invalidated` - sqlTail := ` FROM comment INNER JOIN ( - SELECT C.id, C.review_id, C.line, C.tree_path, C.patch, C.commit_sha, C.invalidated - FROM comment AS C - WHERE C.type = 21 - AND C.created_unix = - (SELECT MIN(comment.created_unix) - FROM comment - WHERE comment.review_id = C.review_id - AND comment.type = 21 - AND comment.line = C.line - AND comment.tree_path = C.tree_path) - ) AS first - ON comment.review_id = first.review_id - AND comment.tree_path = first.tree_path AND comment.line = first.line - WHERE comment.type = 21 - AND comment.id != first.id - AND comment.commit_sha != first.commit_sha` + type Repository struct { + ID int64 `xorm:"pk autoincr"` + Topics []string `xorm:"TEXT JSON"` + } - var ( - sqlCmd string - start = 0 - batchSize = 100 - sess = x.NewSession() - ) + const batchSize = 100 + sess := x.NewSession() defer sess.Close() - for { + repos := make([]*Repository, 0, batchSize) + topics := make([]string, 0, batchSize) + for start := 0; ; start += batchSize { + repos = repos[:0] + if err := sess.Begin(); err != nil { return err } - if setting.Database.Type.IsMSSQL() { - if _, err := sess.Exec(sqlSelect + " INTO #temp_comments" + sqlTail); err != nil { - log.Error("unable to create temporary table") - return err - } - } - - comments := make([]*Comment, 0, batchSize) - - switch { - case setting.Database.Type.IsMySQL(): - sqlCmd = sqlSelect + sqlTail + " LIMIT " + strconv.Itoa(batchSize) + ", " + strconv.Itoa(start) - case setting.Database.Type.IsPostgreSQL(): - fallthrough - case setting.Database.Type.IsSQLite3(): - sqlCmd = sqlSelect + sqlTail + " LIMIT " + strconv.Itoa(batchSize) + " OFFSET " + strconv.Itoa(start) - case setting.Database.Type.IsMSSQL(): - sqlCmd = "SELECT TOP " + strconv.Itoa(batchSize) + " * FROM #temp_comments WHERE " + - "(id NOT IN ( SELECT TOP " + strconv.Itoa(start) + " id FROM #temp_comments ORDER BY id )) ORDER BY id" - default: - return fmt.Errorf("Unsupported database type") + if err := sess.Limit(batchSize, start).Find(&repos); err != nil { + return err } - if err := sess.SQL(sqlCmd).Find(&comments); err != nil { - log.Error("failed to select: %v", err) - return err + if len(repos) == 0 { + break } - for _, comment := range comments { - if _, err := sess.Table("comment").ID(comment.ID).Cols("commit_sha", "patch", "invalidated").Update(comment); err != nil { - log.Error("failed to update comment[%d]: %v %v", comment.ID, comment, err) + for _, repo := range repos { + topics = topics[:0] + if err := sess.Select("name").Table("topic"). + Join("INNER", "repo_topic", "repo_topic.topic_id = topic.id"). + Where("repo_topic.repo_id = ?", repo.ID).Desc("topic.repo_count").Find(&topics); err != nil { + return err + } + repo.Topics = topics + if _, err := sess.ID(repo.ID).Cols("topics").Update(repo); err != nil { return err } } - start += len(comments) - if err := sess.Commit(); err != nil { return err } - if len(comments) < batchSize { - break - } } return nil diff --git a/models/migrations/v1_14/v159.go b/models/migrations/v1_14/v159.go index 149ae0f6a8e26..2029829ff9901 100644 --- a/models/migrations/v1_14/v159.go +++ b/models/migrations/v1_14/v159.go @@ -4,35 +4,108 @@ package v1_14 //nolint import ( - "code.gitea.io/gitea/models/migrations/base" - "code.gitea.io/gitea/modules/timeutil" + "fmt" + "strconv" + + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" "xorm.io/xorm" ) -func UpdateReactionConstraint(x *xorm.Engine) error { - // Reaction represents a reactions on issues and comments. - type Reaction struct { - ID int64 `xorm:"pk autoincr"` - Type string `xorm:"INDEX UNIQUE(s) NOT NULL"` - IssueID int64 `xorm:"INDEX UNIQUE(s) NOT NULL"` - CommentID int64 `xorm:"INDEX UNIQUE(s)"` - UserID int64 `xorm:"INDEX UNIQUE(s) NOT NULL"` - OriginalAuthorID int64 `xorm:"INDEX UNIQUE(s) NOT NULL DEFAULT(0)"` - OriginalAuthor string `xorm:"INDEX UNIQUE(s)"` - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - } +func UpdateCodeCommentReplies(x *xorm.Engine) error { + type Comment struct { + ID int64 `xorm:"pk autoincr"` + CommitSHA string `xorm:"VARCHAR(40)"` + Patch string `xorm:"TEXT patch"` + Invalidated bool - sess := x.NewSession() - defer sess.Close() + // Not extracted but used in the below query + Type int `xorm:"INDEX"` + Line int64 // - previous line / + proposed line + TreePath string + ReviewID int64 `xorm:"index"` + } - if err := sess.Begin(); err != nil { + if err := x.Sync2(new(Comment)); err != nil { return err } - if err := base.RecreateTable(sess, &Reaction{}); err != nil { - return err + sqlSelect := `SELECT comment.id as id, first.commit_sha as commit_sha, first.patch as patch, first.invalidated as invalidated` + sqlTail := ` FROM comment INNER JOIN ( + SELECT C.id, C.review_id, C.line, C.tree_path, C.patch, C.commit_sha, C.invalidated + FROM comment AS C + WHERE C.type = 21 + AND C.created_unix = + (SELECT MIN(comment.created_unix) + FROM comment + WHERE comment.review_id = C.review_id + AND comment.type = 21 + AND comment.line = C.line + AND comment.tree_path = C.tree_path) + ) AS first + ON comment.review_id = first.review_id + AND comment.tree_path = first.tree_path AND comment.line = first.line + WHERE comment.type = 21 + AND comment.id != first.id + AND comment.commit_sha != first.commit_sha` + + var ( + sqlCmd string + start = 0 + batchSize = 100 + sess = x.NewSession() + ) + defer sess.Close() + for { + if err := sess.Begin(); err != nil { + return err + } + + if setting.Database.Type.IsMSSQL() { + if _, err := sess.Exec(sqlSelect + " INTO #temp_comments" + sqlTail); err != nil { + log.Error("unable to create temporary table") + return err + } + } + + comments := make([]*Comment, 0, batchSize) + + switch { + case setting.Database.Type.IsMySQL(): + sqlCmd = sqlSelect + sqlTail + " LIMIT " + strconv.Itoa(batchSize) + ", " + strconv.Itoa(start) + case setting.Database.Type.IsPostgreSQL(): + fallthrough + case setting.Database.Type.IsSQLite3(): + sqlCmd = sqlSelect + sqlTail + " LIMIT " + strconv.Itoa(batchSize) + " OFFSET " + strconv.Itoa(start) + case setting.Database.Type.IsMSSQL(): + sqlCmd = "SELECT TOP " + strconv.Itoa(batchSize) + " * FROM #temp_comments WHERE " + + "(id NOT IN ( SELECT TOP " + strconv.Itoa(start) + " id FROM #temp_comments ORDER BY id )) ORDER BY id" + default: + return fmt.Errorf("Unsupported database type") + } + + if err := sess.SQL(sqlCmd).Find(&comments); err != nil { + log.Error("failed to select: %v", err) + return err + } + + for _, comment := range comments { + if _, err := sess.Table("comment").ID(comment.ID).Cols("commit_sha", "patch", "invalidated").Update(comment); err != nil { + log.Error("failed to update comment[%d]: %v %v", comment.ID, comment, err) + return err + } + } + + start += len(comments) + + if err := sess.Commit(); err != nil { + return err + } + if len(comments) < batchSize { + break + } } - return sess.Commit() + return nil } diff --git a/models/migrations/v1_14/v160.go b/models/migrations/v1_14/v160.go index b9b7e7fbdd5d4..149ae0f6a8e26 100644 --- a/models/migrations/v1_14/v160.go +++ b/models/migrations/v1_14/v160.go @@ -4,13 +4,35 @@ package v1_14 //nolint import ( + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func AddBlockOnOfficialReviewRequests(x *xorm.Engine) error { - type ProtectedBranch struct { - BlockOnOfficialReviewRequests bool `xorm:"NOT NULL DEFAULT false"` +func UpdateReactionConstraint(x *xorm.Engine) error { + // Reaction represents a reactions on issues and comments. + type Reaction struct { + ID int64 `xorm:"pk autoincr"` + Type string `xorm:"INDEX UNIQUE(s) NOT NULL"` + IssueID int64 `xorm:"INDEX UNIQUE(s) NOT NULL"` + CommentID int64 `xorm:"INDEX UNIQUE(s)"` + UserID int64 `xorm:"INDEX UNIQUE(s) NOT NULL"` + OriginalAuthorID int64 `xorm:"INDEX UNIQUE(s) NOT NULL DEFAULT(0)"` + OriginalAuthor string `xorm:"INDEX UNIQUE(s)"` + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + } + + sess := x.NewSession() + defer sess.Close() + + if err := sess.Begin(); err != nil { + return err + } + + if err := base.RecreateTable(sess, &Reaction{}); err != nil { + return err } - return x.Sync2(new(ProtectedBranch)) + return sess.Commit() } diff --git a/models/migrations/v1_14/v161.go b/models/migrations/v1_14/v161.go index ef3c14d595017..b9b7e7fbdd5d4 100644 --- a/models/migrations/v1_14/v161.go +++ b/models/migrations/v1_14/v161.go @@ -4,70 +4,13 @@ package v1_14 //nolint import ( - "context" - - "code.gitea.io/gitea/models/migrations/base" - "xorm.io/xorm" ) -func ConvertTaskTypeToString(x *xorm.Engine) error { - const ( - GOGS int = iota + 1 - SLACK - GITEA - DISCORD - DINGTALK - TELEGRAM - MSTEAMS - FEISHU - MATRIX - WECHATWORK - ) - - hookTaskTypes := map[int]string{ - GITEA: "gitea", - GOGS: "gogs", - SLACK: "slack", - DISCORD: "discord", - DINGTALK: "dingtalk", - TELEGRAM: "telegram", - MSTEAMS: "msteams", - FEISHU: "feishu", - MATRIX: "matrix", - WECHATWORK: "wechatwork", - } - - type HookTask struct { - Typ string `xorm:"VARCHAR(16) index"` - } - if err := x.Sync2(new(HookTask)); err != nil { - return err - } - - // to keep the migration could be rerun - exist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "hook_task", "type") - if err != nil { - return err - } - if !exist { - return nil - } - - for i, s := range hookTaskTypes { - if _, err := x.Exec("UPDATE hook_task set typ = ? where `type`=?", s, i); err != nil { - return err - } - } - - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - if err := base.DropTableColumns(sess, "hook_task", "type"); err != nil { - return err +func AddBlockOnOfficialReviewRequests(x *xorm.Engine) error { + type ProtectedBranch struct { + BlockOnOfficialReviewRequests bool `xorm:"NOT NULL DEFAULT false"` } - return sess.Commit() + return x.Sync2(new(ProtectedBranch)) } diff --git a/models/migrations/v1_14/v162.go b/models/migrations/v1_14/v162.go index 972f771fd7b3a..ef3c14d595017 100644 --- a/models/migrations/v1_14/v162.go +++ b/models/migrations/v1_14/v162.go @@ -4,12 +4,14 @@ package v1_14 //nolint import ( + "context" + "code.gitea.io/gitea/models/migrations/base" "xorm.io/xorm" ) -func ConvertWebhookTaskTypeToString(x *xorm.Engine) error { +func ConvertTaskTypeToString(x *xorm.Engine) error { const ( GOGS int = iota + 1 SLACK @@ -36,15 +38,24 @@ func ConvertWebhookTaskTypeToString(x *xorm.Engine) error { WECHATWORK: "wechatwork", } - type Webhook struct { - Type string `xorm:"char(16) index"` + type HookTask struct { + Typ string `xorm:"VARCHAR(16) index"` } - if err := x.Sync2(new(Webhook)); err != nil { + if err := x.Sync2(new(HookTask)); err != nil { return err } + // to keep the migration could be rerun + exist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "hook_task", "type") + if err != nil { + return err + } + if !exist { + return nil + } + for i, s := range hookTaskTypes { - if _, err := x.Exec("UPDATE webhook set type = ? where hook_task_type=?", s, i); err != nil { + if _, err := x.Exec("UPDATE hook_task set typ = ? where `type`=?", s, i); err != nil { return err } } @@ -54,7 +65,7 @@ func ConvertWebhookTaskTypeToString(x *xorm.Engine) error { if err := sess.Begin(); err != nil { return err } - if err := base.DropTableColumns(sess, "webhook", "hook_task_type"); err != nil { + if err := base.DropTableColumns(sess, "hook_task", "type"); err != nil { return err } diff --git a/models/migrations/v1_14/v163.go b/models/migrations/v1_14/v163.go index 22f55f48d1691..972f771fd7b3a 100644 --- a/models/migrations/v1_14/v163.go +++ b/models/migrations/v1_14/v163.go @@ -9,25 +9,52 @@ import ( "xorm.io/xorm" ) -func ConvertTopicNameFrom25To50(x *xorm.Engine) error { - type Topic struct { - ID int64 `xorm:"pk autoincr"` - Name string `xorm:"UNIQUE VARCHAR(50)"` - RepoCount int - CreatedUnix int64 `xorm:"INDEX created"` - UpdatedUnix int64 `xorm:"INDEX updated"` +func ConvertWebhookTaskTypeToString(x *xorm.Engine) error { + const ( + GOGS int = iota + 1 + SLACK + GITEA + DISCORD + DINGTALK + TELEGRAM + MSTEAMS + FEISHU + MATRIX + WECHATWORK + ) + + hookTaskTypes := map[int]string{ + GITEA: "gitea", + GOGS: "gogs", + SLACK: "slack", + DISCORD: "discord", + DINGTALK: "dingtalk", + TELEGRAM: "telegram", + MSTEAMS: "msteams", + FEISHU: "feishu", + MATRIX: "matrix", + WECHATWORK: "wechatwork", } - if err := x.Sync2(new(Topic)); err != nil { + type Webhook struct { + Type string `xorm:"char(16) index"` + } + if err := x.Sync2(new(Webhook)); err != nil { return err } + for i, s := range hookTaskTypes { + if _, err := x.Exec("UPDATE webhook set type = ? where hook_task_type=?", s, i); err != nil { + return err + } + } + sess := x.NewSession() defer sess.Close() if err := sess.Begin(); err != nil { return err } - if err := base.RecreateTable(sess, new(Topic)); err != nil { + if err := base.DropTableColumns(sess, "webhook", "hook_task_type"); err != nil { return err } diff --git a/models/migrations/v1_14/v164.go b/models/migrations/v1_14/v164.go index a6791b16e03d5..22f55f48d1691 100644 --- a/models/migrations/v1_14/v164.go +++ b/models/migrations/v1_14/v164.go @@ -4,34 +4,32 @@ package v1_14 //nolint import ( - "fmt" + "code.gitea.io/gitea/models/migrations/base" "xorm.io/xorm" ) -// OAuth2Grant here is a snapshot of models.OAuth2Grant for this version -// of the database, as it does not appear to have been added as a part -// of a previous migration. -type OAuth2Grant struct { - ID int64 `xorm:"pk autoincr"` - UserID int64 `xorm:"INDEX unique(user_application)"` - ApplicationID int64 `xorm:"INDEX unique(user_application)"` - Counter int64 `xorm:"NOT NULL DEFAULT 1"` - Scope string `xorm:"TEXT"` - Nonce string `xorm:"TEXT"` - CreatedUnix int64 `xorm:"created"` - UpdatedUnix int64 `xorm:"updated"` -} +func ConvertTopicNameFrom25To50(x *xorm.Engine) error { + type Topic struct { + ID int64 `xorm:"pk autoincr"` + Name string `xorm:"UNIQUE VARCHAR(50)"` + RepoCount int + CreatedUnix int64 `xorm:"INDEX created"` + UpdatedUnix int64 `xorm:"INDEX updated"` + } -// TableName sets the database table name to be the correct one, as the -// autogenerated table name for this struct is "o_auth2_grant". -func (grant *OAuth2Grant) TableName() string { - return "oauth2_grant" -} + if err := x.Sync2(new(Topic)); err != nil { + return err + } -func AddScopeAndNonceColumnsToOAuth2Grant(x *xorm.Engine) error { - if err := x.Sync2(new(OAuth2Grant)); err != nil { - return fmt.Errorf("Sync2: %w", err) + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + if err := base.RecreateTable(sess, new(Topic)); err != nil { + return err } - return nil + + return sess.Commit() } diff --git a/models/migrations/v1_14/v165.go b/models/migrations/v1_14/v165.go index 926350cdf7803..a6791b16e03d5 100644 --- a/models/migrations/v1_14/v165.go +++ b/models/migrations/v1_14/v165.go @@ -4,66 +4,34 @@ package v1_14 //nolint import ( - "code.gitea.io/gitea/models/migrations/base" + "fmt" "xorm.io/xorm" - "xorm.io/xorm/schemas" ) -func ConvertHookTaskTypeToVarcharAndTrim(x *xorm.Engine) error { - dbType := x.Dialect().URI().DBType - if dbType == schemas.SQLITE { // For SQLITE, varchar or char will always be represented as TEXT - return nil - } - - type HookTask struct { //nolint:unused - Typ string `xorm:"VARCHAR(16) index"` - } - - if err := base.ModifyColumn(x, "hook_task", &schemas.Column{ - Name: "typ", - SQLType: schemas.SQLType{ - Name: "VARCHAR", - }, - Length: 16, - Nullable: true, // To keep compatible as nullable - DefaultIsEmpty: true, - }); err != nil { - return err - } - - var hookTaskTrimSQL string - if dbType == schemas.MSSQL { - hookTaskTrimSQL = "UPDATE hook_task SET typ = RTRIM(LTRIM(typ))" - } else { - hookTaskTrimSQL = "UPDATE hook_task SET typ = TRIM(typ)" - } - if _, err := x.Exec(hookTaskTrimSQL); err != nil { - return err - } - - type Webhook struct { //nolint:unused - Type string `xorm:"VARCHAR(16) index"` - } +// OAuth2Grant here is a snapshot of models.OAuth2Grant for this version +// of the database, as it does not appear to have been added as a part +// of a previous migration. +type OAuth2Grant struct { + ID int64 `xorm:"pk autoincr"` + UserID int64 `xorm:"INDEX unique(user_application)"` + ApplicationID int64 `xorm:"INDEX unique(user_application)"` + Counter int64 `xorm:"NOT NULL DEFAULT 1"` + Scope string `xorm:"TEXT"` + Nonce string `xorm:"TEXT"` + CreatedUnix int64 `xorm:"created"` + UpdatedUnix int64 `xorm:"updated"` +} - if err := base.ModifyColumn(x, "webhook", &schemas.Column{ - Name: "type", - SQLType: schemas.SQLType{ - Name: "VARCHAR", - }, - Length: 16, - Nullable: true, // To keep compatible as nullable - DefaultIsEmpty: true, - }); err != nil { - return err - } +// TableName sets the database table name to be the correct one, as the +// autogenerated table name for this struct is "o_auth2_grant". +func (grant *OAuth2Grant) TableName() string { + return "oauth2_grant" +} - var webhookTrimSQL string - if dbType == schemas.MSSQL { - webhookTrimSQL = "UPDATE webhook SET type = RTRIM(LTRIM(type))" - } else { - webhookTrimSQL = "UPDATE webhook SET type = TRIM(type)" +func AddScopeAndNonceColumnsToOAuth2Grant(x *xorm.Engine) error { + if err := x.Sync2(new(OAuth2Grant)); err != nil { + return fmt.Errorf("Sync2: %w", err) } - _, err := x.Exec(webhookTrimSQL) - return err + return nil } diff --git a/models/migrations/v1_14/v166.go b/models/migrations/v1_14/v166.go index de7626076a95d..926350cdf7803 100644 --- a/models/migrations/v1_14/v166.go +++ b/models/migrations/v1_14/v166.go @@ -1,112 +1,69 @@ -// Copyright 2021 The Gitea Authors. All rights reserved. +// Copyright 2020 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_14 //nolint import ( - "encoding/hex" + "code.gitea.io/gitea/models/migrations/base" - "github.com/minio/sha256-simd" - "golang.org/x/crypto/argon2" - "golang.org/x/crypto/bcrypt" - "golang.org/x/crypto/pbkdf2" - "golang.org/x/crypto/scrypt" - "xorm.io/builder" "xorm.io/xorm" + "xorm.io/xorm/schemas" ) -func RecalculateUserEmptyPWD(x *xorm.Engine) (err error) { - const ( - algoBcrypt = "bcrypt" - algoScrypt = "scrypt" - algoArgon2 = "argon2" - algoPbkdf2 = "pbkdf2" - ) - - type User struct { - ID int64 `xorm:"pk autoincr"` - Passwd string `xorm:"NOT NULL"` - PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"` - MustChangePassword bool `xorm:"NOT NULL DEFAULT false"` - LoginType int - LoginName string - Type int - Salt string `xorm:"VARCHAR(10)"` +func ConvertHookTaskTypeToVarcharAndTrim(x *xorm.Engine) error { + dbType := x.Dialect().URI().DBType + if dbType == schemas.SQLITE { // For SQLITE, varchar or char will always be represented as TEXT + return nil } - // hashPassword hash password based on algo and salt - // state 461406070c - hashPassword := func(passwd, salt, algo string) string { - var tempPasswd []byte - - switch algo { - case algoBcrypt: - tempPasswd, _ = bcrypt.GenerateFromPassword([]byte(passwd), bcrypt.DefaultCost) - return string(tempPasswd) - case algoScrypt: - tempPasswd, _ = scrypt.Key([]byte(passwd), []byte(salt), 65536, 16, 2, 50) - case algoArgon2: - tempPasswd = argon2.IDKey([]byte(passwd), []byte(salt), 2, 65536, 8, 50) - case algoPbkdf2: - fallthrough - default: - tempPasswd = pbkdf2.Key([]byte(passwd), []byte(salt), 10000, 50, sha256.New) - } - - return hex.EncodeToString(tempPasswd) + type HookTask struct { //nolint:unused + Typ string `xorm:"VARCHAR(16) index"` } - // ValidatePassword checks if given password matches the one belongs to the user. - // state 461406070c, changed since it's not necessary to be time constant - ValidatePassword := func(u *User, passwd string) bool { - tempHash := hashPassword(passwd, u.Salt, u.PasswdHashAlgo) - - if u.PasswdHashAlgo != algoBcrypt && u.Passwd == tempHash { - return true - } - if u.PasswdHashAlgo == algoBcrypt && bcrypt.CompareHashAndPassword([]byte(u.Passwd), []byte(passwd)) == nil { - return true - } - return false + if err := base.ModifyColumn(x, "hook_task", &schemas.Column{ + Name: "typ", + SQLType: schemas.SQLType{ + Name: "VARCHAR", + }, + Length: 16, + Nullable: true, // To keep compatible as nullable + DefaultIsEmpty: true, + }); err != nil { + return err } - sess := x.NewSession() - defer sess.Close() - - const batchSize = 100 - - for start := 0; ; start += batchSize { - users := make([]*User, 0, batchSize) - if err = sess.Limit(batchSize, start).Where(builder.Neq{"passwd": ""}, 0).Find(&users); err != nil { - return - } - if len(users) == 0 { - break - } - - if err = sess.Begin(); err != nil { - return - } - - for _, user := range users { - if ValidatePassword(user, "") { - user.Passwd = "" - user.Salt = "" - user.PasswdHashAlgo = "" - if _, err = sess.ID(user.ID).Cols("passwd", "salt", "passwd_hash_algo").Update(user); err != nil { - return err - } - } - } + var hookTaskTrimSQL string + if dbType == schemas.MSSQL { + hookTaskTrimSQL = "UPDATE hook_task SET typ = RTRIM(LTRIM(typ))" + } else { + hookTaskTrimSQL = "UPDATE hook_task SET typ = TRIM(typ)" + } + if _, err := x.Exec(hookTaskTrimSQL); err != nil { + return err + } - if err = sess.Commit(); err != nil { - return - } + type Webhook struct { //nolint:unused + Type string `xorm:"VARCHAR(16) index"` } - // delete salt and algo where password is empty - _, err = sess.Where(builder.Eq{"passwd": ""}.And(builder.Neq{"salt": ""}.Or(builder.Neq{"passwd_hash_algo": ""}))). - Cols("salt", "passwd_hash_algo").Update(&User{}) + if err := base.ModifyColumn(x, "webhook", &schemas.Column{ + Name: "type", + SQLType: schemas.SQLType{ + Name: "VARCHAR", + }, + Length: 16, + Nullable: true, // To keep compatible as nullable + DefaultIsEmpty: true, + }); err != nil { + return err + } + var webhookTrimSQL string + if dbType == schemas.MSSQL { + webhookTrimSQL = "UPDATE webhook SET type = RTRIM(LTRIM(type))" + } else { + webhookTrimSQL = "UPDATE webhook SET type = TRIM(type)" + } + _, err := x.Exec(webhookTrimSQL) return err } diff --git a/models/migrations/v1_14/v167.go b/models/migrations/v1_14/v167.go index fb6b3b474af92..de7626076a95d 100644 --- a/models/migrations/v1_14/v167.go +++ b/models/migrations/v1_14/v167.go @@ -4,20 +4,109 @@ package v1_14 //nolint import ( - "fmt" + "encoding/hex" + "github.com/minio/sha256-simd" + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/pbkdf2" + "golang.org/x/crypto/scrypt" + "xorm.io/builder" "xorm.io/xorm" ) -func AddUserRedirect(x *xorm.Engine) (err error) { - type UserRedirect struct { - ID int64 `xorm:"pk autoincr"` - LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` - RedirectUserID int64 +func RecalculateUserEmptyPWD(x *xorm.Engine) (err error) { + const ( + algoBcrypt = "bcrypt" + algoScrypt = "scrypt" + algoArgon2 = "argon2" + algoPbkdf2 = "pbkdf2" + ) + + type User struct { + ID int64 `xorm:"pk autoincr"` + Passwd string `xorm:"NOT NULL"` + PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'argon2'"` + MustChangePassword bool `xorm:"NOT NULL DEFAULT false"` + LoginType int + LoginName string + Type int + Salt string `xorm:"VARCHAR(10)"` + } + + // hashPassword hash password based on algo and salt + // state 461406070c + hashPassword := func(passwd, salt, algo string) string { + var tempPasswd []byte + + switch algo { + case algoBcrypt: + tempPasswd, _ = bcrypt.GenerateFromPassword([]byte(passwd), bcrypt.DefaultCost) + return string(tempPasswd) + case algoScrypt: + tempPasswd, _ = scrypt.Key([]byte(passwd), []byte(salt), 65536, 16, 2, 50) + case algoArgon2: + tempPasswd = argon2.IDKey([]byte(passwd), []byte(salt), 2, 65536, 8, 50) + case algoPbkdf2: + fallthrough + default: + tempPasswd = pbkdf2.Key([]byte(passwd), []byte(salt), 10000, 50, sha256.New) + } + + return hex.EncodeToString(tempPasswd) } - if err := x.Sync2(new(UserRedirect)); err != nil { - return fmt.Errorf("Sync2: %w", err) + // ValidatePassword checks if given password matches the one belongs to the user. + // state 461406070c, changed since it's not necessary to be time constant + ValidatePassword := func(u *User, passwd string) bool { + tempHash := hashPassword(passwd, u.Salt, u.PasswdHashAlgo) + + if u.PasswdHashAlgo != algoBcrypt && u.Passwd == tempHash { + return true + } + if u.PasswdHashAlgo == algoBcrypt && bcrypt.CompareHashAndPassword([]byte(u.Passwd), []byte(passwd)) == nil { + return true + } + return false } - return nil + + sess := x.NewSession() + defer sess.Close() + + const batchSize = 100 + + for start := 0; ; start += batchSize { + users := make([]*User, 0, batchSize) + if err = sess.Limit(batchSize, start).Where(builder.Neq{"passwd": ""}, 0).Find(&users); err != nil { + return + } + if len(users) == 0 { + break + } + + if err = sess.Begin(); err != nil { + return + } + + for _, user := range users { + if ValidatePassword(user, "") { + user.Passwd = "" + user.Salt = "" + user.PasswdHashAlgo = "" + if _, err = sess.ID(user.ID).Cols("passwd", "salt", "passwd_hash_algo").Update(user); err != nil { + return err + } + } + } + + if err = sess.Commit(); err != nil { + return + } + } + + // delete salt and algo where password is empty + _, err = sess.Where(builder.Eq{"passwd": ""}.And(builder.Neq{"salt": ""}.Or(builder.Neq{"passwd_hash_algo": ""}))). + Cols("salt", "passwd_hash_algo").Update(&User{}) + + return err } diff --git a/models/migrations/v1_14/v168.go b/models/migrations/v1_14/v168.go index a30a8859f7fea..fb6b3b474af92 100644 --- a/models/migrations/v1_14/v168.go +++ b/models/migrations/v1_14/v168.go @@ -3,8 +3,21 @@ package v1_14 //nolint -import "xorm.io/xorm" +import ( + "fmt" -func RecreateUserTableToFixDefaultValues(_ *xorm.Engine) error { + "xorm.io/xorm" +) + +func AddUserRedirect(x *xorm.Engine) (err error) { + type UserRedirect struct { + ID int64 `xorm:"pk autoincr"` + LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` + RedirectUserID int64 + } + + if err := x.Sync2(new(UserRedirect)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } return nil } diff --git a/models/migrations/v1_14/v169.go b/models/migrations/v1_14/v169.go index 5b81bb58b199c..a30a8859f7fea 100644 --- a/models/migrations/v1_14/v169.go +++ b/models/migrations/v1_14/v169.go @@ -3,11 +3,8 @@ package v1_14 //nolint -import ( - "xorm.io/xorm" -) +import "xorm.io/xorm" -func CommentTypeDeleteBranchUseOldRef(x *xorm.Engine) error { - _, err := x.Exec("UPDATE comment SET old_ref = commit_sha, commit_sha = '' WHERE type = 11") - return err +func RecreateUserTableToFixDefaultValues(_ *xorm.Engine) error { + return nil } diff --git a/models/migrations/v1_14/v170.go b/models/migrations/v1_14/v170.go index adb28cae5e6c4..5b81bb58b199c 100644 --- a/models/migrations/v1_14/v170.go +++ b/models/migrations/v1_14/v170.go @@ -4,18 +4,10 @@ package v1_14 //nolint import ( - "fmt" - "xorm.io/xorm" ) -func AddDismissedReviewColumn(x *xorm.Engine) error { - type Review struct { - Dismissed bool `xorm:"NOT NULL DEFAULT false"` - } - - if err := x.Sync2(new(Review)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - return nil +func CommentTypeDeleteBranchUseOldRef(x *xorm.Engine) error { + _, err := x.Exec("UPDATE comment SET old_ref = commit_sha, commit_sha = '' WHERE type = 11") + return err } diff --git a/models/migrations/v1_14/v171.go b/models/migrations/v1_14/v171.go index 35769342e8755..adb28cae5e6c4 100644 --- a/models/migrations/v1_14/v171.go +++ b/models/migrations/v1_14/v171.go @@ -9,12 +9,12 @@ import ( "xorm.io/xorm" ) -func AddSortingColToProjectBoard(x *xorm.Engine) error { - type ProjectBoard struct { - Sorting int8 `xorm:"NOT NULL DEFAULT 0"` +func AddDismissedReviewColumn(x *xorm.Engine) error { + type Review struct { + Dismissed bool `xorm:"NOT NULL DEFAULT false"` } - if err := x.Sync2(new(ProjectBoard)); err != nil { + if err := x.Sync2(new(Review)); err != nil { return fmt.Errorf("Sync2: %w", err) } return nil diff --git a/models/migrations/v1_14/v172.go b/models/migrations/v1_14/v172.go index 76519b94579ae..35769342e8755 100644 --- a/models/migrations/v1_14/v172.go +++ b/models/migrations/v1_14/v172.go @@ -1,19 +1,21 @@ -// Copyright 2020 The Gitea Authors. All rights reserved. +// Copyright 2021 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_14 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" + "fmt" "xorm.io/xorm" ) -func AddSessionTable(x *xorm.Engine) error { - type Session struct { - Key string `xorm:"pk CHAR(16)"` - Data []byte `xorm:"BLOB"` - Expiry timeutil.TimeStamp +func AddSortingColToProjectBoard(x *xorm.Engine) error { + type ProjectBoard struct { + Sorting int8 `xorm:"NOT NULL DEFAULT 0"` } - return x.Sync2(new(Session)) + + if err := x.Sync2(new(ProjectBoard)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } + return nil } diff --git a/models/migrations/v1_14/v173.go b/models/migrations/v1_14/v173.go index 3b7ecb8f9d8fd..76519b94579ae 100644 --- a/models/migrations/v1_14/v173.go +++ b/models/migrations/v1_14/v173.go @@ -1,21 +1,19 @@ -// Copyright 2021 The Gitea Authors. All rights reserved. +// Copyright 2020 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_14 //nolint import ( - "fmt" + "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" ) -func AddTimeIDCommentColumn(x *xorm.Engine) error { - type Comment struct { - TimeID int64 +func AddSessionTable(x *xorm.Engine) error { + type Session struct { + Key string `xorm:"pk CHAR(16)"` + Data []byte `xorm:"BLOB"` + Expiry timeutil.TimeStamp } - - if err := x.Sync2(new(Comment)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - return nil + return x.Sync2(new(Session)) } diff --git a/models/migrations/v1_14/v174.go b/models/migrations/v1_14/v174.go index 766d3a4208e4b..3b7ecb8f9d8fd 100644 --- a/models/migrations/v1_14/v174.go +++ b/models/migrations/v1_14/v174.go @@ -9,26 +9,13 @@ import ( "xorm.io/xorm" ) -func AddRepoTransfer(x *xorm.Engine) error { - type RepoTransfer struct { - ID int64 `xorm:"pk autoincr"` - DoerID int64 - RecipientID int64 - RepoID int64 - TeamIDs []int64 - CreatedUnix int64 `xorm:"INDEX NOT NULL created"` - UpdatedUnix int64 `xorm:"INDEX NOT NULL updated"` +func AddTimeIDCommentColumn(x *xorm.Engine) error { + type Comment struct { + TimeID int64 } - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - - if err := sess.Sync2(new(RepoTransfer)); err != nil { + if err := x.Sync2(new(Comment)); err != nil { return fmt.Errorf("Sync2: %w", err) } - - return sess.Commit() + return nil } diff --git a/models/migrations/v1_14/v175.go b/models/migrations/v1_14/v175.go index 70d72b2600337..766d3a4208e4b 100644 --- a/models/migrations/v1_14/v175.go +++ b/models/migrations/v1_14/v175.go @@ -5,17 +5,19 @@ package v1_14 //nolint import ( "fmt" - "regexp" - - "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/setting" "xorm.io/xorm" ) -func FixPostgresIDSequences(x *xorm.Engine) error { - if !setting.Database.Type.IsPostgreSQL() { - return nil +func AddRepoTransfer(x *xorm.Engine) error { + type RepoTransfer struct { + ID int64 `xorm:"pk autoincr"` + DoerID int64 + RecipientID int64 + RepoID int64 + TeamIDs []int64 + CreatedUnix int64 `xorm:"INDEX NOT NULL created"` + UpdatedUnix int64 `xorm:"INDEX NOT NULL updated"` } sess := x.NewSession() @@ -24,29 +26,8 @@ func FixPostgresIDSequences(x *xorm.Engine) error { return err } - var sequences []string - schema := sess.Engine().Dialect().URI().Schema - - sess.Engine().SetSchema("") - if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE 'tmp_recreate__%_id_seq%' AND sequence_catalog = ?", setting.Database.Name).Find(&sequences); err != nil { - log.Error("Unable to find sequences: %v", err) - return err - } - sess.Engine().SetSchema(schema) - - sequenceRegexp := regexp.MustCompile(`tmp_recreate__(\w+)_id_seq.*`) - - for _, sequence := range sequences { - tableName := sequenceRegexp.FindStringSubmatch(sequence)[1] - newSequenceName := tableName + "_id_seq" - if _, err := sess.Exec(fmt.Sprintf("ALTER SEQUENCE `%s` RENAME TO `%s`", sequence, newSequenceName)); err != nil { - log.Error("Unable to rename %s to %s. Error: %v", sequence, newSequenceName, err) - return err - } - if _, err := sess.Exec(fmt.Sprintf("SELECT setval('%s', COALESCE((SELECT MAX(id)+1 FROM `%s`), 1), false)", newSequenceName, tableName)); err != nil { - log.Error("Unable to reset sequence %s for %s. Error: %v", newSequenceName, tableName, err) - return err - } + if err := sess.Sync2(new(RepoTransfer)); err != nil { + return fmt.Errorf("Sync2: %w", err) } return sess.Commit() diff --git a/models/migrations/v1_14/v176.go b/models/migrations/v1_14/v176.go index bd2484e49d7b7..70d72b2600337 100644 --- a/models/migrations/v1_14/v176.go +++ b/models/migrations/v1_14/v176.go @@ -4,73 +4,50 @@ package v1_14 //nolint import ( - "xorm.io/xorm" -) + "fmt" + "regexp" -// RemoveInvalidLabels looks through the database to look for comments and issue_labels -// that refer to labels do not belong to the repository or organization that repository -// that the issue is in -func RemoveInvalidLabels(x *xorm.Engine) error { - type Comment struct { - ID int64 `xorm:"pk autoincr"` - Type int `xorm:"INDEX"` - IssueID int64 `xorm:"INDEX"` - LabelID int64 - } + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" - type Issue struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"` - Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository. - } + "xorm.io/xorm" +) - type Repository struct { - ID int64 `xorm:"pk autoincr"` - OwnerID int64 `xorm:"UNIQUE(s) index"` - LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` +func FixPostgresIDSequences(x *xorm.Engine) error { + if !setting.Database.Type.IsPostgreSQL() { + return nil } - type Label struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"INDEX"` - OrgID int64 `xorm:"INDEX"` + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err } - type IssueLabel struct { - ID int64 `xorm:"pk autoincr"` - IssueID int64 `xorm:"UNIQUE(s)"` - LabelID int64 `xorm:"UNIQUE(s)"` - } + var sequences []string + schema := sess.Engine().Dialect().URI().Schema - if err := x.Sync2(new(Comment), new(Issue), new(Repository), new(Label), new(IssueLabel)); err != nil { + sess.Engine().SetSchema("") + if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE 'tmp_recreate__%_id_seq%' AND sequence_catalog = ?", setting.Database.Name).Find(&sequences); err != nil { + log.Error("Unable to find sequences: %v", err) return err } + sess.Engine().SetSchema(schema) - if _, err := x.Exec(`DELETE FROM issue_label WHERE issue_label.id IN ( - SELECT il_too.id FROM ( - SELECT il_too_too.id - FROM issue_label AS il_too_too - INNER JOIN label ON il_too_too.label_id = label.id - INNER JOIN issue on issue.id = il_too_too.issue_id - INNER JOIN repository on repository.id = issue.repo_id - WHERE - (label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id) - ) AS il_too )`); err != nil { - return err - } + sequenceRegexp := regexp.MustCompile(`tmp_recreate__(\w+)_id_seq.*`) - if _, err := x.Exec(`DELETE FROM comment WHERE comment.id IN ( - SELECT il_too.id FROM ( - SELECT com.id - FROM comment AS com - INNER JOIN label ON com.label_id = label.id - INNER JOIN issue on issue.id = com.issue_id - INNER JOIN repository on repository.id = issue.repo_id - WHERE - com.type = ? AND ((label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id)) - ) AS il_too)`, 7); err != nil { - return err + for _, sequence := range sequences { + tableName := sequenceRegexp.FindStringSubmatch(sequence)[1] + newSequenceName := tableName + "_id_seq" + if _, err := sess.Exec(fmt.Sprintf("ALTER SEQUENCE `%s` RENAME TO `%s`", sequence, newSequenceName)); err != nil { + log.Error("Unable to rename %s to %s. Error: %v", sequence, newSequenceName, err) + return err + } + if _, err := sess.Exec(fmt.Sprintf("SELECT setval('%s', COALESCE((SELECT MAX(id)+1 FROM `%s`), 1), false)", newSequenceName, tableName)); err != nil { + log.Error("Unable to reset sequence %s for %s. Error: %v", newSequenceName, tableName, err) + return err + } } - return nil + return sess.Commit() } diff --git a/models/migrations/v1_14/v176_test.go b/models/migrations/v1_14/v176_test.go deleted file mode 100644 index ea3e750d7f953..0000000000000 --- a/models/migrations/v1_14/v176_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2021 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_14 //nolint - -import ( - "testing" - - "code.gitea.io/gitea/models/migrations/base" - - "github.com/stretchr/testify/assert" -) - -func Test_RemoveInvalidLabels(t *testing.T) { - // Models used by the migration - type Comment struct { - ID int64 `xorm:"pk autoincr"` - Type int `xorm:"INDEX"` - IssueID int64 `xorm:"INDEX"` - LabelID int64 - ShouldRemain bool // <- Flag for testing the migration - } - - type Issue struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"` - Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository. - } - - type Repository struct { - ID int64 `xorm:"pk autoincr"` - OwnerID int64 `xorm:"UNIQUE(s) index"` - LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` - } - - type Label struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"INDEX"` - OrgID int64 `xorm:"INDEX"` - } - - type IssueLabel struct { - ID int64 `xorm:"pk autoincr"` - IssueID int64 `xorm:"UNIQUE(s)"` - LabelID int64 `xorm:"UNIQUE(s)"` - ShouldRemain bool // <- Flag for testing the migration - } - - // load and prepare the test database - x, deferable := base.PrepareTestEnv(t, 0, new(Comment), new(Issue), new(Repository), new(IssueLabel), new(Label)) - if x == nil || t.Failed() { - defer deferable() - return - } - defer deferable() - - var issueLabels []*IssueLabel - ilPreMigration := map[int64]*IssueLabel{} - ilPostMigration := map[int64]*IssueLabel{} - - var comments []*Comment - comPreMigration := map[int64]*Comment{} - comPostMigration := map[int64]*Comment{} - - // Get pre migration values - if err := x.Find(&issueLabels); err != nil { - t.Errorf("Unable to find issueLabels: %v", err) - return - } - for _, issueLabel := range issueLabels { - ilPreMigration[issueLabel.ID] = issueLabel - } - if err := x.Find(&comments); err != nil { - t.Errorf("Unable to find comments: %v", err) - return - } - for _, comment := range comments { - comPreMigration[comment.ID] = comment - } - - // Run the migration - if err := RemoveInvalidLabels(x); err != nil { - t.Errorf("unable to RemoveInvalidLabels: %v", err) - } - - // Get the post migration values - issueLabels = issueLabels[:0] - if err := x.Find(&issueLabels); err != nil { - t.Errorf("Unable to find issueLabels: %v", err) - return - } - for _, issueLabel := range issueLabels { - ilPostMigration[issueLabel.ID] = issueLabel - } - comments = comments[:0] - if err := x.Find(&comments); err != nil { - t.Errorf("Unable to find comments: %v", err) - return - } - for _, comment := range comments { - comPostMigration[comment.ID] = comment - } - - // Finally test results of the migration - for id, comment := range comPreMigration { - post, ok := comPostMigration[id] - if ok { - if !comment.ShouldRemain { - t.Errorf("Comment[%d] remained but should have been deleted", id) - } - assert.Equal(t, comment, post) - } else if comment.ShouldRemain { - t.Errorf("Comment[%d] was deleted but should have remained", id) - } - } - - for id, il := range ilPreMigration { - post, ok := ilPostMigration[id] - if ok { - if !il.ShouldRemain { - t.Errorf("IssueLabel[%d] remained but should have been deleted", id) - } - assert.Equal(t, il, post) - } else if il.ShouldRemain { - t.Errorf("IssueLabel[%d] was deleted but should have remained", id) - } - } -} diff --git a/models/migrations/v1_14/v177.go b/models/migrations/v1_14/v177.go index e72a9e53a95bd..bd2484e49d7b7 100644 --- a/models/migrations/v1_14/v177.go +++ b/models/migrations/v1_14/v177.go @@ -4,39 +4,73 @@ package v1_14 //nolint import ( - "fmt" - "xorm.io/xorm" ) -// DeleteOrphanedIssueLabels looks through the database for issue_labels where the label no longer exists and deletes them. -func DeleteOrphanedIssueLabels(x *xorm.Engine) error { +// RemoveInvalidLabels looks through the database to look for comments and issue_labels +// that refer to labels do not belong to the repository or organization that repository +// that the issue is in +func RemoveInvalidLabels(x *xorm.Engine) error { + type Comment struct { + ID int64 `xorm:"pk autoincr"` + Type int `xorm:"INDEX"` + IssueID int64 `xorm:"INDEX"` + LabelID int64 + } + + type Issue struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"` + Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository. + } + + type Repository struct { + ID int64 `xorm:"pk autoincr"` + OwnerID int64 `xorm:"UNIQUE(s) index"` + LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` + } + + type Label struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"INDEX"` + OrgID int64 `xorm:"INDEX"` + } + type IssueLabel struct { ID int64 `xorm:"pk autoincr"` IssueID int64 `xorm:"UNIQUE(s)"` LabelID int64 `xorm:"UNIQUE(s)"` } - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { + if err := x.Sync2(new(Comment), new(Issue), new(Repository), new(Label), new(IssueLabel)); err != nil { return err } - if err := sess.Sync2(new(IssueLabel)); err != nil { - return fmt.Errorf("Sync2: %w", err) + if _, err := x.Exec(`DELETE FROM issue_label WHERE issue_label.id IN ( + SELECT il_too.id FROM ( + SELECT il_too_too.id + FROM issue_label AS il_too_too + INNER JOIN label ON il_too_too.label_id = label.id + INNER JOIN issue on issue.id = il_too_too.issue_id + INNER JOIN repository on repository.id = issue.repo_id + WHERE + (label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id) + ) AS il_too )`); err != nil { + return err } - if _, err := sess.Exec(`DELETE FROM issue_label WHERE issue_label.id IN ( - SELECT ill.id FROM ( - SELECT il.id - FROM issue_label AS il - LEFT JOIN label ON il.label_id = label.id - WHERE - label.id IS NULL - ) AS ill)`); err != nil { + if _, err := x.Exec(`DELETE FROM comment WHERE comment.id IN ( + SELECT il_too.id FROM ( + SELECT com.id + FROM comment AS com + INNER JOIN label ON com.label_id = label.id + INNER JOIN issue on issue.id = com.issue_id + INNER JOIN repository on repository.id = issue.repo_id + WHERE + com.type = ? AND ((label.org_id = 0 AND issue.repo_id != label.repo_id) OR (label.repo_id = 0 AND label.org_id != repository.owner_id)) + ) AS il_too)`, 7); err != nil { return err } - return sess.Commit() + return nil } diff --git a/models/migrations/v1_14/v177_test.go b/models/migrations/v1_14/v177_test.go index 5568a18fec0d4..ea3e750d7f953 100644 --- a/models/migrations/v1_14/v177_test.go +++ b/models/migrations/v1_14/v177_test.go @@ -7,34 +7,47 @@ import ( "testing" "code.gitea.io/gitea/models/migrations/base" - "code.gitea.io/gitea/modules/timeutil" "github.com/stretchr/testify/assert" ) -func Test_DeleteOrphanedIssueLabels(t *testing.T) { - // Create the models used in the migration - type IssueLabel struct { - ID int64 `xorm:"pk autoincr"` - IssueID int64 `xorm:"UNIQUE(s)"` - LabelID int64 `xorm:"UNIQUE(s)"` +func Test_RemoveInvalidLabels(t *testing.T) { + // Models used by the migration + type Comment struct { + ID int64 `xorm:"pk autoincr"` + Type int `xorm:"INDEX"` + IssueID int64 `xorm:"INDEX"` + LabelID int64 + ShouldRemain bool // <- Flag for testing the migration + } + + type Issue struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"INDEX UNIQUE(repo_index)"` + Index int64 `xorm:"UNIQUE(repo_index)"` // Index in one repository. + } + + type Repository struct { + ID int64 `xorm:"pk autoincr"` + OwnerID int64 `xorm:"UNIQUE(s) index"` + LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` } type Label struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"INDEX"` - OrgID int64 `xorm:"INDEX"` - Name string - Description string - Color string `xorm:"VARCHAR(7)"` - NumIssues int - NumClosedIssues int - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - } - - // Prepare and load the testing database - x, deferable := base.PrepareTestEnv(t, 0, new(IssueLabel), new(Label)) + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"INDEX"` + OrgID int64 `xorm:"INDEX"` + } + + type IssueLabel struct { + ID int64 `xorm:"pk autoincr"` + IssueID int64 `xorm:"UNIQUE(s)"` + LabelID int64 `xorm:"UNIQUE(s)"` + ShouldRemain bool // <- Flag for testing the migration + } + + // load and prepare the test database + x, deferable := base.PrepareTestEnv(t, 0, new(Comment), new(Issue), new(Repository), new(IssueLabel), new(Label)) if x == nil || t.Failed() { defer deferable() return @@ -42,47 +55,74 @@ func Test_DeleteOrphanedIssueLabels(t *testing.T) { defer deferable() var issueLabels []*IssueLabel - preMigration := map[int64]*IssueLabel{} - postMigration := map[int64]*IssueLabel{} + ilPreMigration := map[int64]*IssueLabel{} + ilPostMigration := map[int64]*IssueLabel{} + + var comments []*Comment + comPreMigration := map[int64]*Comment{} + comPostMigration := map[int64]*Comment{} - // Load issue labels that exist in the database pre-migration + // Get pre migration values if err := x.Find(&issueLabels); err != nil { - assert.NoError(t, err) + t.Errorf("Unable to find issueLabels: %v", err) return } for _, issueLabel := range issueLabels { - preMigration[issueLabel.ID] = issueLabel + ilPreMigration[issueLabel.ID] = issueLabel + } + if err := x.Find(&comments); err != nil { + t.Errorf("Unable to find comments: %v", err) + return + } + for _, comment := range comments { + comPreMigration[comment.ID] = comment } // Run the migration - if err := DeleteOrphanedIssueLabels(x); err != nil { - assert.NoError(t, err) - return + if err := RemoveInvalidLabels(x); err != nil { + t.Errorf("unable to RemoveInvalidLabels: %v", err) } - // Load the remaining issue-labels + // Get the post migration values issueLabels = issueLabels[:0] if err := x.Find(&issueLabels); err != nil { - assert.NoError(t, err) + t.Errorf("Unable to find issueLabels: %v", err) return } for _, issueLabel := range issueLabels { - postMigration[issueLabel.ID] = issueLabel + ilPostMigration[issueLabel.ID] = issueLabel } - - // Now test what is left - if _, ok := postMigration[2]; ok { - t.Errorf("Orphaned Label[2] survived the migration") + comments = comments[:0] + if err := x.Find(&comments); err != nil { + t.Errorf("Unable to find comments: %v", err) return } + for _, comment := range comments { + comPostMigration[comment.ID] = comment + } - if _, ok := postMigration[5]; ok { - t.Errorf("Orphaned Label[5] survived the migration") - return + // Finally test results of the migration + for id, comment := range comPreMigration { + post, ok := comPostMigration[id] + if ok { + if !comment.ShouldRemain { + t.Errorf("Comment[%d] remained but should have been deleted", id) + } + assert.Equal(t, comment, post) + } else if comment.ShouldRemain { + t.Errorf("Comment[%d] was deleted but should have remained", id) + } } - for id, post := range postMigration { - pre := preMigration[id] - assert.Equal(t, pre, post, "migration changed issueLabel %d", id) + for id, il := range ilPreMigration { + post, ok := ilPostMigration[id] + if ok { + if !il.ShouldRemain { + t.Errorf("IssueLabel[%d] remained but should have been deleted", id) + } + assert.Equal(t, il, post) + } else if il.ShouldRemain { + t.Errorf("IssueLabel[%d] was deleted but should have remained", id) + } } } diff --git a/models/migrations/v1_14/v178.go b/models/migrations/v1_14/v178.go new file mode 100644 index 0000000000000..e72a9e53a95bd --- /dev/null +++ b/models/migrations/v1_14/v178.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_14 //nolint + +import ( + "fmt" + + "xorm.io/xorm" +) + +// DeleteOrphanedIssueLabels looks through the database for issue_labels where the label no longer exists and deletes them. +func DeleteOrphanedIssueLabels(x *xorm.Engine) error { + type IssueLabel struct { + ID int64 `xorm:"pk autoincr"` + IssueID int64 `xorm:"UNIQUE(s)"` + LabelID int64 `xorm:"UNIQUE(s)"` + } + + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + + if err := sess.Sync2(new(IssueLabel)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } + + if _, err := sess.Exec(`DELETE FROM issue_label WHERE issue_label.id IN ( + SELECT ill.id FROM ( + SELECT il.id + FROM issue_label AS il + LEFT JOIN label ON il.label_id = label.id + WHERE + label.id IS NULL + ) AS ill)`); err != nil { + return err + } + + return sess.Commit() +} diff --git a/models/migrations/v1_14/v178_test.go b/models/migrations/v1_14/v178_test.go new file mode 100644 index 0000000000000..5568a18fec0d4 --- /dev/null +++ b/models/migrations/v1_14/v178_test.go @@ -0,0 +1,88 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_14 //nolint + +import ( + "testing" + + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/timeutil" + + "github.com/stretchr/testify/assert" +) + +func Test_DeleteOrphanedIssueLabels(t *testing.T) { + // Create the models used in the migration + type IssueLabel struct { + ID int64 `xorm:"pk autoincr"` + IssueID int64 `xorm:"UNIQUE(s)"` + LabelID int64 `xorm:"UNIQUE(s)"` + } + + type Label struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"INDEX"` + OrgID int64 `xorm:"INDEX"` + Name string + Description string + Color string `xorm:"VARCHAR(7)"` + NumIssues int + NumClosedIssues int + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + + // Prepare and load the testing database + x, deferable := base.PrepareTestEnv(t, 0, new(IssueLabel), new(Label)) + if x == nil || t.Failed() { + defer deferable() + return + } + defer deferable() + + var issueLabels []*IssueLabel + preMigration := map[int64]*IssueLabel{} + postMigration := map[int64]*IssueLabel{} + + // Load issue labels that exist in the database pre-migration + if err := x.Find(&issueLabels); err != nil { + assert.NoError(t, err) + return + } + for _, issueLabel := range issueLabels { + preMigration[issueLabel.ID] = issueLabel + } + + // Run the migration + if err := DeleteOrphanedIssueLabels(x); err != nil { + assert.NoError(t, err) + return + } + + // Load the remaining issue-labels + issueLabels = issueLabels[:0] + if err := x.Find(&issueLabels); err != nil { + assert.NoError(t, err) + return + } + for _, issueLabel := range issueLabels { + postMigration[issueLabel.ID] = issueLabel + } + + // Now test what is left + if _, ok := postMigration[2]; ok { + t.Errorf("Orphaned Label[2] survived the migration") + return + } + + if _, ok := postMigration[5]; ok { + t.Errorf("Orphaned Label[5] survived the migration") + return + } + + for id, post := range postMigration { + pre := preMigration[id] + assert.Equal(t, pre, post, "migration changed issueLabel %d", id) + } +} diff --git a/models/migrations/v1_15/v178.go b/models/migrations/v1_15/v178.go deleted file mode 100644 index 9bb6ed7f8df76..0000000000000 --- a/models/migrations/v1_15/v178.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_15 //nolint - -import ( - "xorm.io/xorm" -) - -func AddLFSMirrorColumns(x *xorm.Engine) error { - type Mirror struct { - LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"` - LFSEndpoint string `xorm:"lfs_endpoint TEXT"` - } - - return x.Sync2(new(Mirror)) -} diff --git a/models/migrations/v1_15/v179.go b/models/migrations/v1_15/v179.go index f6b142eb42d46..9bb6ed7f8df76 100644 --- a/models/migrations/v1_15/v179.go +++ b/models/migrations/v1_15/v179.go @@ -4,25 +4,14 @@ package v1_15 //nolint import ( - "code.gitea.io/gitea/models/migrations/base" - "xorm.io/xorm" - "xorm.io/xorm/schemas" ) -func ConvertAvatarURLToText(x *xorm.Engine) error { - dbType := x.Dialect().URI().DBType - if dbType == schemas.SQLITE { // For SQLITE, varchar or char will always be represented as TEXT - return nil +func AddLFSMirrorColumns(x *xorm.Engine) error { + type Mirror struct { + LFS bool `xorm:"lfs_enabled NOT NULL DEFAULT false"` + LFSEndpoint string `xorm:"lfs_endpoint TEXT"` } - // Some oauth2 providers may give very long avatar urls (i.e. Google) - return base.ModifyColumn(x, "external_login_user", &schemas.Column{ - Name: "avatar_url", - SQLType: schemas.SQLType{ - Name: schemas.Text, - }, - Nullable: true, - DefaultIsEmpty: true, - }) + return x.Sync2(new(Mirror)) } diff --git a/models/migrations/v1_15/v180.go b/models/migrations/v1_15/v180.go index 17163ee2c683f..f6b142eb42d46 100644 --- a/models/migrations/v1_15/v180.go +++ b/models/migrations/v1_15/v180.go @@ -4,118 +4,25 @@ package v1_15 //nolint import ( - "code.gitea.io/gitea/modules/json" - "code.gitea.io/gitea/modules/util" + "code.gitea.io/gitea/models/migrations/base" - "xorm.io/builder" "xorm.io/xorm" + "xorm.io/xorm/schemas" ) -func DeleteMigrationCredentials(x *xorm.Engine) (err error) { - // Task represents a task - type Task struct { - ID int64 - DoerID int64 `xorm:"index"` // operator - OwnerID int64 `xorm:"index"` // repo owner id, when creating, the repoID maybe zero - RepoID int64 `xorm:"index"` - Type int - Status int `xorm:"index"` - StartTime int64 - EndTime int64 - PayloadContent string `xorm:"TEXT"` - Errors string `xorm:"TEXT"` // if task failed, saved the error reason - Created int64 `xorm:"created"` +func ConvertAvatarURLToText(x *xorm.Engine) error { + dbType := x.Dialect().URI().DBType + if dbType == schemas.SQLITE { // For SQLITE, varchar or char will always be represented as TEXT + return nil } - const TaskTypeMigrateRepo = 0 - const TaskStatusStopped = 2 - - const batchSize = 100 - - // only match migration tasks, that are not pending or running - cond := builder.Eq{ - "type": TaskTypeMigrateRepo, - }.And(builder.Gte{ - "status": TaskStatusStopped, + // Some oauth2 providers may give very long avatar urls (i.e. Google) + return base.ModifyColumn(x, "external_login_user", &schemas.Column{ + Name: "avatar_url", + SQLType: schemas.SQLType{ + Name: schemas.Text, + }, + Nullable: true, + DefaultIsEmpty: true, }) - - sess := x.NewSession() - defer sess.Close() - - for start := 0; ; start += batchSize { - tasks := make([]*Task, 0, batchSize) - if err = sess.Limit(batchSize, start).Where(cond, 0).Find(&tasks); err != nil { - return - } - if len(tasks) == 0 { - break - } - if err = sess.Begin(); err != nil { - return - } - for _, t := range tasks { - if t.PayloadContent, err = removeCredentials(t.PayloadContent); err != nil { - return - } - if _, err = sess.ID(t.ID).Cols("payload_content").Update(t); err != nil { - return - } - } - if err = sess.Commit(); err != nil { - return - } - } - return err -} - -func removeCredentials(payload string) (string, error) { - // MigrateOptions defines the way a repository gets migrated - // this is for internal usage by migrations module and func who interact with it - type MigrateOptions struct { - // required: true - CloneAddr string `json:"clone_addr" binding:"Required"` - CloneAddrEncrypted string `json:"clone_addr_encrypted,omitempty"` - AuthUsername string `json:"auth_username"` - AuthPassword string `json:"-"` - AuthPasswordEncrypted string `json:"auth_password_encrypted,omitempty"` - AuthToken string `json:"-"` - AuthTokenEncrypted string `json:"auth_token_encrypted,omitempty"` - // required: true - UID int `json:"uid" binding:"Required"` - // required: true - RepoName string `json:"repo_name" binding:"Required"` - Mirror bool `json:"mirror"` - LFS bool `json:"lfs"` - LFSEndpoint string `json:"lfs_endpoint"` - Private bool `json:"private"` - Description string `json:"description"` - OriginalURL string - GitServiceType int - Wiki bool - Issues bool - Milestones bool - Labels bool - Releases bool - Comments bool - PullRequests bool - ReleaseAssets bool - MigrateToRepoID int64 - MirrorInterval string `json:"mirror_interval"` - } - - var opts MigrateOptions - err := json.Unmarshal([]byte(payload), &opts) - if err != nil { - return "", err - } - - opts.AuthPassword = "" - opts.AuthToken = "" - opts.CloneAddr = util.SanitizeCredentialURLs(opts.CloneAddr) - - confBytes, err := json.Marshal(opts) - if err != nil { - return "", err - } - return string(confBytes), nil } diff --git a/models/migrations/v1_15/v181.go b/models/migrations/v1_15/v181.go index e2bb3208c41d8..17163ee2c683f 100644 --- a/models/migrations/v1_15/v181.go +++ b/models/migrations/v1_15/v181.go @@ -4,89 +4,118 @@ package v1_15 //nolint import ( - "strings" + "code.gitea.io/gitea/modules/json" + "code.gitea.io/gitea/modules/util" + "xorm.io/builder" "xorm.io/xorm" ) -func AddPrimaryEmail2EmailAddress(x *xorm.Engine) (err error) { - type User struct { - ID int64 `xorm:"pk autoincr"` - Email string `xorm:"NOT NULL"` - IsActive bool `xorm:"INDEX"` // Activate primary email +func DeleteMigrationCredentials(x *xorm.Engine) (err error) { + // Task represents a task + type Task struct { + ID int64 + DoerID int64 `xorm:"index"` // operator + OwnerID int64 `xorm:"index"` // repo owner id, when creating, the repoID maybe zero + RepoID int64 `xorm:"index"` + Type int + Status int `xorm:"index"` + StartTime int64 + EndTime int64 + PayloadContent string `xorm:"TEXT"` + Errors string `xorm:"TEXT"` // if task failed, saved the error reason + Created int64 `xorm:"created"` } - type EmailAddress1 struct { - ID int64 `xorm:"pk autoincr"` - UID int64 `xorm:"INDEX NOT NULL"` - Email string `xorm:"UNIQUE NOT NULL"` - LowerEmail string - IsActivated bool - IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"` - } - - // Add lower_email and is_primary columns - if err = x.Table("email_address").Sync2(new(EmailAddress1)); err != nil { - return - } + const TaskTypeMigrateRepo = 0 + const TaskStatusStopped = 2 - if _, err = x.Exec("UPDATE email_address SET lower_email=LOWER(email), is_primary=?", false); err != nil { - return - } - - type EmailAddress struct { - ID int64 `xorm:"pk autoincr"` - UID int64 `xorm:"INDEX NOT NULL"` - Email string `xorm:"UNIQUE NOT NULL"` - LowerEmail string `xorm:"UNIQUE NOT NULL"` - IsActivated bool - IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"` - } + const batchSize = 100 - // change lower_email as unique - if err = x.Sync2(new(EmailAddress)); err != nil { - return - } + // only match migration tasks, that are not pending or running + cond := builder.Eq{ + "type": TaskTypeMigrateRepo, + }.And(builder.Gte{ + "status": TaskStatusStopped, + }) sess := x.NewSession() defer sess.Close() - const batchSize = 100 - for start := 0; ; start += batchSize { - users := make([]*User, 0, batchSize) - if err = sess.Limit(batchSize, start).Find(&users); err != nil { + tasks := make([]*Task, 0, batchSize) + if err = sess.Limit(batchSize, start).Where(cond, 0).Find(&tasks); err != nil { return } - if len(users) == 0 { + if len(tasks) == 0 { break } - - for _, user := range users { - var exist bool - exist, err = sess.Where("email=?", user.Email).Table("email_address").Exist() - if err != nil { + if err = sess.Begin(); err != nil { + return + } + for _, t := range tasks { + if t.PayloadContent, err = removeCredentials(t.PayloadContent); err != nil { return } - if !exist { - if _, err = sess.Insert(&EmailAddress{ - UID: user.ID, - Email: user.Email, - LowerEmail: strings.ToLower(user.Email), - IsActivated: user.IsActive, - IsPrimary: true, - }); err != nil { - return - } - } else { - if _, err = sess.Where("email=?", user.Email).Cols("is_primary").Update(&EmailAddress{ - IsPrimary: true, - }); err != nil { - return - } + if _, err = sess.ID(t.ID).Cols("payload_content").Update(t); err != nil { + return } } + if err = sess.Commit(); err != nil { + return + } } + return err +} - return nil +func removeCredentials(payload string) (string, error) { + // MigrateOptions defines the way a repository gets migrated + // this is for internal usage by migrations module and func who interact with it + type MigrateOptions struct { + // required: true + CloneAddr string `json:"clone_addr" binding:"Required"` + CloneAddrEncrypted string `json:"clone_addr_encrypted,omitempty"` + AuthUsername string `json:"auth_username"` + AuthPassword string `json:"-"` + AuthPasswordEncrypted string `json:"auth_password_encrypted,omitempty"` + AuthToken string `json:"-"` + AuthTokenEncrypted string `json:"auth_token_encrypted,omitempty"` + // required: true + UID int `json:"uid" binding:"Required"` + // required: true + RepoName string `json:"repo_name" binding:"Required"` + Mirror bool `json:"mirror"` + LFS bool `json:"lfs"` + LFSEndpoint string `json:"lfs_endpoint"` + Private bool `json:"private"` + Description string `json:"description"` + OriginalURL string + GitServiceType int + Wiki bool + Issues bool + Milestones bool + Labels bool + Releases bool + Comments bool + PullRequests bool + ReleaseAssets bool + MigrateToRepoID int64 + MirrorInterval string `json:"mirror_interval"` + } + + var opts MigrateOptions + err := json.Unmarshal([]byte(payload), &opts) + if err != nil { + return "", err + } + + opts.AuthPassword = "" + opts.AuthToken = "" + opts.CloneAddr = util.SanitizeCredentialURLs(opts.CloneAddr) + + confBytes, err := json.Marshal(opts) + if err != nil { + return "", err + } + return string(confBytes), nil } diff --git a/models/migrations/v1_15/v181_test.go b/models/migrations/v1_15/v181_test.go deleted file mode 100644 index 1b075be7a0949..0000000000000 --- a/models/migrations/v1_15/v181_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2021 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_15 //nolint - -import ( - "strings" - "testing" - - "code.gitea.io/gitea/models/migrations/base" - - "github.com/stretchr/testify/assert" -) - -func Test_AddPrimaryEmail2EmailAddress(t *testing.T) { - type User struct { - ID int64 - Email string - IsActive bool - } - - // Prepare and load the testing database - x, deferable := base.PrepareTestEnv(t, 0, new(User)) - if x == nil || t.Failed() { - defer deferable() - return - } - defer deferable() - - err := AddPrimaryEmail2EmailAddress(x) - assert.NoError(t, err) - - type EmailAddress struct { - ID int64 `xorm:"pk autoincr"` - UID int64 `xorm:"INDEX NOT NULL"` - Email string `xorm:"UNIQUE NOT NULL"` - LowerEmail string `xorm:"UNIQUE NOT NULL"` - IsActivated bool - IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"` - } - - users := make([]User, 0, 20) - err = x.Find(&users) - assert.NoError(t, err) - - for _, user := range users { - var emailAddress EmailAddress - has, err := x.Where("lower_email=?", strings.ToLower(user.Email)).Get(&emailAddress) - assert.NoError(t, err) - assert.True(t, has) - assert.True(t, emailAddress.IsPrimary) - assert.EqualValues(t, user.IsActive, emailAddress.IsActivated) - assert.EqualValues(t, user.ID, emailAddress.UID) - } -} diff --git a/models/migrations/v1_15/v182.go b/models/migrations/v1_15/v182.go index c584ef851df37..e2bb3208c41d8 100644 --- a/models/migrations/v1_15/v182.go +++ b/models/migrations/v1_15/v182.go @@ -4,38 +4,89 @@ package v1_15 //nolint import ( + "strings" + "xorm.io/xorm" ) -func AddIssueResourceIndexTable(x *xorm.Engine) error { - type ResourceIndex struct { - GroupID int64 `xorm:"pk"` - MaxIndex int64 `xorm:"index"` +func AddPrimaryEmail2EmailAddress(x *xorm.Engine) (err error) { + type User struct { + ID int64 `xorm:"pk autoincr"` + Email string `xorm:"NOT NULL"` + IsActive bool `xorm:"INDEX"` // Activate primary email } - sess := x.NewSession() - defer sess.Close() + type EmailAddress1 struct { + ID int64 `xorm:"pk autoincr"` + UID int64 `xorm:"INDEX NOT NULL"` + Email string `xorm:"UNIQUE NOT NULL"` + LowerEmail string + IsActivated bool + IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"` + } + + // Add lower_email and is_primary columns + if err = x.Table("email_address").Sync2(new(EmailAddress1)); err != nil { + return + } - if err := sess.Begin(); err != nil { - return err + if _, err = x.Exec("UPDATE email_address SET lower_email=LOWER(email), is_primary=?", false); err != nil { + return } - if err := sess.Table("issue_index").Sync2(new(ResourceIndex)); err != nil { - return err + type EmailAddress struct { + ID int64 `xorm:"pk autoincr"` + UID int64 `xorm:"INDEX NOT NULL"` + Email string `xorm:"UNIQUE NOT NULL"` + LowerEmail string `xorm:"UNIQUE NOT NULL"` + IsActivated bool + IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"` } - // Remove data we're goint to rebuild - if _, err := sess.Table("issue_index").Where("1=1").Delete(&ResourceIndex{}); err != nil { - return err + // change lower_email as unique + if err = x.Sync2(new(EmailAddress)); err != nil { + return } - // Create current data for all repositories with issues and PRs - if _, err := sess.Exec("INSERT INTO issue_index (group_id, max_index) " + - "SELECT max_data.repo_id, max_data.max_index " + - "FROM ( SELECT issue.repo_id AS repo_id, max(issue.`index`) AS max_index " + - "FROM issue GROUP BY issue.repo_id) AS max_data"); err != nil { - return err + sess := x.NewSession() + defer sess.Close() + + const batchSize = 100 + + for start := 0; ; start += batchSize { + users := make([]*User, 0, batchSize) + if err = sess.Limit(batchSize, start).Find(&users); err != nil { + return + } + if len(users) == 0 { + break + } + + for _, user := range users { + var exist bool + exist, err = sess.Where("email=?", user.Email).Table("email_address").Exist() + if err != nil { + return + } + if !exist { + if _, err = sess.Insert(&EmailAddress{ + UID: user.ID, + Email: user.Email, + LowerEmail: strings.ToLower(user.Email), + IsActivated: user.IsActive, + IsPrimary: true, + }); err != nil { + return + } + } else { + if _, err = sess.Where("email=?", user.Email).Cols("is_primary").Update(&EmailAddress{ + IsPrimary: true, + }); err != nil { + return + } + } + } } - return sess.Commit() + return nil } diff --git a/models/migrations/v1_15/v182_test.go b/models/migrations/v1_15/v182_test.go index 75ef8e1cd83f3..1b075be7a0949 100644 --- a/models/migrations/v1_15/v182_test.go +++ b/models/migrations/v1_15/v182_test.go @@ -4,6 +4,7 @@ package v1_15 //nolint import ( + "strings" "testing" "code.gitea.io/gitea/models/migrations/base" @@ -11,50 +12,44 @@ import ( "github.com/stretchr/testify/assert" ) -func Test_AddIssueResourceIndexTable(t *testing.T) { - // Create the models used in the migration - type Issue struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"UNIQUE(s)"` - Index int64 `xorm:"UNIQUE(s)"` +func Test_AddPrimaryEmail2EmailAddress(t *testing.T) { + type User struct { + ID int64 + Email string + IsActive bool } // Prepare and load the testing database - x, deferable := base.PrepareTestEnv(t, 0, new(Issue)) + x, deferable := base.PrepareTestEnv(t, 0, new(User)) if x == nil || t.Failed() { defer deferable() return } defer deferable() - // Run the migration - if err := AddIssueResourceIndexTable(x); err != nil { - assert.NoError(t, err) - return - } + err := AddPrimaryEmail2EmailAddress(x) + assert.NoError(t, err) - type ResourceIndex struct { - GroupID int64 `xorm:"pk"` - MaxIndex int64 `xorm:"index"` + type EmailAddress struct { + ID int64 `xorm:"pk autoincr"` + UID int64 `xorm:"INDEX NOT NULL"` + Email string `xorm:"UNIQUE NOT NULL"` + LowerEmail string `xorm:"UNIQUE NOT NULL"` + IsActivated bool + IsPrimary bool `xorm:"DEFAULT(false) NOT NULL"` } - start := 0 - const batchSize = 1000 - for { - indexes := make([]ResourceIndex, 0, batchSize) - err := x.Table("issue_index").Limit(batchSize, start).Find(&indexes) - assert.NoError(t, err) + users := make([]User, 0, 20) + err = x.Find(&users) + assert.NoError(t, err) - for _, idx := range indexes { - var maxIndex int - has, err := x.SQL("SELECT max(`index`) FROM issue WHERE repo_id = ?", idx.GroupID).Get(&maxIndex) - assert.NoError(t, err) - assert.True(t, has) - assert.EqualValues(t, maxIndex, idx.MaxIndex) - } - if len(indexes) < batchSize { - break - } - start += len(indexes) + for _, user := range users { + var emailAddress EmailAddress + has, err := x.Where("lower_email=?", strings.ToLower(user.Email)).Get(&emailAddress) + assert.NoError(t, err) + assert.True(t, has) + assert.True(t, emailAddress.IsPrimary) + assert.EqualValues(t, user.IsActive, emailAddress.IsActivated) + assert.EqualValues(t, user.ID, emailAddress.UID) } } diff --git a/models/migrations/v1_15/v183.go b/models/migrations/v1_15/v183.go index 4cc98f9efcb36..c584ef851df37 100644 --- a/models/migrations/v1_15/v183.go +++ b/models/migrations/v1_15/v183.go @@ -4,34 +4,37 @@ package v1_15 //nolint import ( - "fmt" - "time" - - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func CreatePushMirrorTable(x *xorm.Engine) error { - type PushMirror struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"INDEX"` - RemoteName string - - Interval time.Duration - CreatedUnix timeutil.TimeStamp `xorm:"created"` - LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"` - LastError string `xorm:"text"` +func AddIssueResourceIndexTable(x *xorm.Engine) error { + type ResourceIndex struct { + GroupID int64 `xorm:"pk"` + MaxIndex int64 `xorm:"index"` } sess := x.NewSession() defer sess.Close() + if err := sess.Begin(); err != nil { return err } - if err := sess.Sync2(new(PushMirror)); err != nil { - return fmt.Errorf("Sync2: %w", err) + if err := sess.Table("issue_index").Sync2(new(ResourceIndex)); err != nil { + return err + } + + // Remove data we're goint to rebuild + if _, err := sess.Table("issue_index").Where("1=1").Delete(&ResourceIndex{}); err != nil { + return err + } + + // Create current data for all repositories with issues and PRs + if _, err := sess.Exec("INSERT INTO issue_index (group_id, max_index) " + + "SELECT max_data.repo_id, max_data.max_index " + + "FROM ( SELECT issue.repo_id AS repo_id, max(issue.`index`) AS max_index " + + "FROM issue GROUP BY issue.repo_id) AS max_data"); err != nil { + return err } return sess.Commit() diff --git a/models/migrations/v1_15/v183_test.go b/models/migrations/v1_15/v183_test.go new file mode 100644 index 0000000000000..75ef8e1cd83f3 --- /dev/null +++ b/models/migrations/v1_15/v183_test.go @@ -0,0 +1,60 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_15 //nolint + +import ( + "testing" + + "code.gitea.io/gitea/models/migrations/base" + + "github.com/stretchr/testify/assert" +) + +func Test_AddIssueResourceIndexTable(t *testing.T) { + // Create the models used in the migration + type Issue struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"UNIQUE(s)"` + Index int64 `xorm:"UNIQUE(s)"` + } + + // Prepare and load the testing database + x, deferable := base.PrepareTestEnv(t, 0, new(Issue)) + if x == nil || t.Failed() { + defer deferable() + return + } + defer deferable() + + // Run the migration + if err := AddIssueResourceIndexTable(x); err != nil { + assert.NoError(t, err) + return + } + + type ResourceIndex struct { + GroupID int64 `xorm:"pk"` + MaxIndex int64 `xorm:"index"` + } + + start := 0 + const batchSize = 1000 + for { + indexes := make([]ResourceIndex, 0, batchSize) + err := x.Table("issue_index").Limit(batchSize, start).Find(&indexes) + assert.NoError(t, err) + + for _, idx := range indexes { + var maxIndex int + has, err := x.SQL("SELECT max(`index`) FROM issue WHERE repo_id = ?", idx.GroupID).Get(&maxIndex) + assert.NoError(t, err) + assert.True(t, has) + assert.EqualValues(t, maxIndex, idx.MaxIndex) + } + if len(indexes) < batchSize { + break + } + start += len(indexes) + } +} diff --git a/models/migrations/v1_15/v184.go b/models/migrations/v1_15/v184.go index caf41b6048ed4..4cc98f9efcb36 100644 --- a/models/migrations/v1_15/v184.go +++ b/models/migrations/v1_15/v184.go @@ -4,36 +4,24 @@ package v1_15 //nolint import ( - "context" "fmt" + "time" - "code.gitea.io/gitea/models/migrations/base" - "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" ) -func RenameTaskErrorsToMessage(x *xorm.Engine) error { - type Task struct { - Errors string `xorm:"TEXT"` // if task failed, saved the error reason - Type int - Status int `xorm:"index"` - } - - // This migration maybe rerun so that we should check if it has been run - messageExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "task", "message") - if err != nil { - return err - } +func CreatePushMirrorTable(x *xorm.Engine) error { + type PushMirror struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"INDEX"` + RemoteName string - if messageExist { - errorsExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "task", "errors") - if err != nil { - return err - } - if !errorsExist { - return nil - } + Interval time.Duration + CreatedUnix timeutil.TimeStamp `xorm:"created"` + LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"` + LastError string `xorm:"text"` } sess := x.NewSession() @@ -42,30 +30,9 @@ func RenameTaskErrorsToMessage(x *xorm.Engine) error { return err } - if err := sess.Sync2(new(Task)); err != nil { - return fmt.Errorf("error on Sync2: %w", err) + if err := sess.Sync2(new(PushMirror)); err != nil { + return fmt.Errorf("Sync2: %w", err) } - if messageExist { - // if both errors and message exist, drop message at first - if err := base.DropTableColumns(sess, "task", "message"); err != nil { - return err - } - } - - switch { - case setting.Database.Type.IsMySQL(): - if _, err := sess.Exec("ALTER TABLE `task` CHANGE errors message text"); err != nil { - return err - } - case setting.Database.Type.IsMSSQL(): - if _, err := sess.Exec("sp_rename 'task.errors', 'message', 'COLUMN'"); err != nil { - return err - } - default: - if _, err := sess.Exec("ALTER TABLE `task` RENAME COLUMN errors TO message"); err != nil { - return err - } - } return sess.Commit() } diff --git a/models/migrations/v1_15/v185.go b/models/migrations/v1_15/v185.go index 382cb621477fd..caf41b6048ed4 100644 --- a/models/migrations/v1_15/v185.go +++ b/models/migrations/v1_15/v185.go @@ -4,18 +4,68 @@ package v1_15 //nolint import ( + "context" + "fmt" + + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/setting" + "xorm.io/xorm" ) -func AddRepoArchiver(x *xorm.Engine) error { - // RepoArchiver represents all archivers - type RepoArchiver struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"index unique(s)"` - Type int `xorm:"unique(s)"` - Status int - CommitID string `xorm:"VARCHAR(40) unique(s)"` - CreatedUnix int64 `xorm:"INDEX NOT NULL created"` +func RenameTaskErrorsToMessage(x *xorm.Engine) error { + type Task struct { + Errors string `xorm:"TEXT"` // if task failed, saved the error reason + Type int + Status int `xorm:"index"` + } + + // This migration maybe rerun so that we should check if it has been run + messageExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "task", "message") + if err != nil { + return err + } + + if messageExist { + errorsExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "task", "errors") + if err != nil { + return err + } + if !errorsExist { + return nil + } + } + + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + + if err := sess.Sync2(new(Task)); err != nil { + return fmt.Errorf("error on Sync2: %w", err) + } + + if messageExist { + // if both errors and message exist, drop message at first + if err := base.DropTableColumns(sess, "task", "message"); err != nil { + return err + } + } + + switch { + case setting.Database.Type.IsMySQL(): + if _, err := sess.Exec("ALTER TABLE `task` CHANGE errors message text"); err != nil { + return err + } + case setting.Database.Type.IsMSSQL(): + if _, err := sess.Exec("sp_rename 'task.errors', 'message', 'COLUMN'"); err != nil { + return err + } + default: + if _, err := sess.Exec("ALTER TABLE `task` RENAME COLUMN errors TO message"); err != nil { + return err + } } - return x.Sync2(new(RepoArchiver)) + return sess.Commit() } diff --git a/models/migrations/v1_15/v186.go b/models/migrations/v1_15/v186.go index 310ac85f4cf9d..382cb621477fd 100644 --- a/models/migrations/v1_15/v186.go +++ b/models/migrations/v1_15/v186.go @@ -4,22 +4,18 @@ package v1_15 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func CreateProtectedTagTable(x *xorm.Engine) error { - type ProtectedTag struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 - NamePattern string - AllowlistUserIDs []int64 `xorm:"JSON TEXT"` - AllowlistTeamIDs []int64 `xorm:"JSON TEXT"` - - CreatedUnix timeutil.TimeStamp `xorm:"created"` - UpdatedUnix timeutil.TimeStamp `xorm:"updated"` +func AddRepoArchiver(x *xorm.Engine) error { + // RepoArchiver represents all archivers + type RepoArchiver struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"index unique(s)"` + Type int `xorm:"unique(s)"` + Status int + CommitID string `xorm:"VARCHAR(40) unique(s)"` + CreatedUnix int64 `xorm:"INDEX NOT NULL created"` } - - return x.Sync2(new(ProtectedTag)) + return x.Sync2(new(RepoArchiver)) } diff --git a/models/migrations/v1_15/v187.go b/models/migrations/v1_15/v187.go index afd86bac45e37..310ac85f4cf9d 100644 --- a/models/migrations/v1_15/v187.go +++ b/models/migrations/v1_15/v187.go @@ -4,44 +4,22 @@ package v1_15 //nolint import ( - "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" ) -func DropWebhookColumns(x *xorm.Engine) error { - // Make sure the columns exist before dropping them - type Webhook struct { - Signature string `xorm:"TEXT"` - IsSSL bool `xorm:"is_ssl"` - } - if err := x.Sync2(new(Webhook)); err != nil { - return err - } +func CreateProtectedTagTable(x *xorm.Engine) error { + type ProtectedTag struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 + NamePattern string + AllowlistUserIDs []int64 `xorm:"JSON TEXT"` + AllowlistTeamIDs []int64 `xorm:"JSON TEXT"` - type HookTask struct { - Typ string `xorm:"VARCHAR(16) index"` - URL string `xorm:"TEXT"` - Signature string `xorm:"TEXT"` - HTTPMethod string `xorm:"http_method"` - ContentType int - IsSSL bool - } - if err := x.Sync2(new(HookTask)); err != nil { - return err - } - - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - if err := base.DropTableColumns(sess, "webhook", "signature", "is_ssl"); err != nil { - return err - } - if err := base.DropTableColumns(sess, "hook_task", "typ", "url", "signature", "http_method", "content_type", "is_ssl"); err != nil { - return err + CreatedUnix timeutil.TimeStamp `xorm:"created"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated"` } - return sess.Commit() + return x.Sync2(new(ProtectedTag)) } diff --git a/models/migrations/v1_15/v188.go b/models/migrations/v1_15/v188.go index 71e45cab0e317..afd86bac45e37 100644 --- a/models/migrations/v1_15/v188.go +++ b/models/migrations/v1_15/v188.go @@ -3,12 +3,45 @@ package v1_15 //nolint -import "xorm.io/xorm" +import ( + "code.gitea.io/gitea/models/migrations/base" -func AddKeyIsVerified(x *xorm.Engine) error { - type GPGKey struct { - Verified bool `xorm:"NOT NULL DEFAULT false"` + "xorm.io/xorm" +) + +func DropWebhookColumns(x *xorm.Engine) error { + // Make sure the columns exist before dropping them + type Webhook struct { + Signature string `xorm:"TEXT"` + IsSSL bool `xorm:"is_ssl"` + } + if err := x.Sync2(new(Webhook)); err != nil { + return err + } + + type HookTask struct { + Typ string `xorm:"VARCHAR(16) index"` + URL string `xorm:"TEXT"` + Signature string `xorm:"TEXT"` + HTTPMethod string `xorm:"http_method"` + ContentType int + IsSSL bool + } + if err := x.Sync2(new(HookTask)); err != nil { + return err + } + + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + if err := base.DropTableColumns(sess, "webhook", "signature", "is_ssl"); err != nil { + return err + } + if err := base.DropTableColumns(sess, "hook_task", "typ", "url", "signature", "http_method", "content_type", "is_ssl"); err != nil { + return err } - return x.Sync(new(GPGKey)) + return sess.Commit() } diff --git a/models/migrations/v1_15/v189.go b/models/migrations/v1_15/v189.go new file mode 100644 index 0000000000000..71e45cab0e317 --- /dev/null +++ b/models/migrations/v1_15/v189.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_15 //nolint + +import "xorm.io/xorm" + +func AddKeyIsVerified(x *xorm.Engine) error { + type GPGKey struct { + Verified bool `xorm:"NOT NULL DEFAULT false"` + } + + return x.Sync(new(GPGKey)) +} diff --git a/models/migrations/v1_16/v189.go b/models/migrations/v1_16/v189.go deleted file mode 100644 index 79e3289ba7f51..0000000000000 --- a/models/migrations/v1_16/v189.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2021 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_16 //nolint - -import ( - "encoding/binary" - "fmt" - - "code.gitea.io/gitea/models/migrations/base" - "code.gitea.io/gitea/modules/json" - - "xorm.io/xorm" -) - -func UnwrapLDAPSourceCfg(x *xorm.Engine) error { - jsonUnmarshalHandleDoubleEncode := func(bs []byte, v any) error { - err := json.Unmarshal(bs, v) - if err != nil { - ok := true - rs := []byte{} - temp := make([]byte, 2) - for _, rn := range string(bs) { - if rn > 0xffff { - ok = false - break - } - binary.LittleEndian.PutUint16(temp, uint16(rn)) - rs = append(rs, temp...) - } - if ok { - if rs[0] == 0xff && rs[1] == 0xfe { - rs = rs[2:] - } - err = json.Unmarshal(rs, v) - } - } - if err != nil && len(bs) > 2 && bs[0] == 0xff && bs[1] == 0xfe { - err = json.Unmarshal(bs[2:], v) - } - return err - } - - // LoginSource represents an external way for authorizing users. - type LoginSource struct { - ID int64 `xorm:"pk autoincr"` - Type int - IsActived bool `xorm:"INDEX NOT NULL DEFAULT false"` - IsActive bool `xorm:"INDEX NOT NULL DEFAULT false"` - Cfg string `xorm:"TEXT"` - } - - const ldapType = 2 - const dldapType = 5 - - type WrappedSource struct { - Source map[string]any - } - - // change lower_email as unique - if err := x.Sync2(new(LoginSource)); err != nil { - return err - } - - sess := x.NewSession() - defer sess.Close() - - const batchSize = 100 - for start := 0; ; start += batchSize { - sources := make([]*LoginSource, 0, batchSize) - if err := sess.Limit(batchSize, start).Where("`type` = ? OR `type` = ?", ldapType, dldapType).Find(&sources); err != nil { - return err - } - if len(sources) == 0 { - break - } - - for _, source := range sources { - wrapped := &WrappedSource{ - Source: map[string]any{}, - } - err := jsonUnmarshalHandleDoubleEncode([]byte(source.Cfg), &wrapped) - if err != nil { - return fmt.Errorf("failed to unmarshal %s: %w", source.Cfg, err) - } - if wrapped.Source != nil && len(wrapped.Source) > 0 { - bs, err := json.Marshal(wrapped.Source) - if err != nil { - return err - } - source.Cfg = string(bs) - if _, err := sess.ID(source.ID).Cols("cfg").Update(source); err != nil { - return err - } - } - } - } - - if _, err := x.SetExpr("is_active", "is_actived").Update(&LoginSource{}); err != nil { - return fmt.Errorf("SetExpr Update failed: %w", err) - } - - if err := sess.Begin(); err != nil { - return err - } - if err := base.DropTableColumns(sess, "login_source", "is_actived"); err != nil { - return err - } - - return sess.Commit() -} diff --git a/models/migrations/v1_16/v190.go b/models/migrations/v1_16/v190.go index 1853729ae99dc..79e3289ba7f51 100644 --- a/models/migrations/v1_16/v190.go +++ b/models/migrations/v1_16/v190.go @@ -4,20 +4,108 @@ package v1_16 //nolint import ( + "encoding/binary" "fmt" + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/json" + "xorm.io/xorm" ) -func AddAgitFlowPullRequest(x *xorm.Engine) error { - type PullRequestFlow int +func UnwrapLDAPSourceCfg(x *xorm.Engine) error { + jsonUnmarshalHandleDoubleEncode := func(bs []byte, v any) error { + err := json.Unmarshal(bs, v) + if err != nil { + ok := true + rs := []byte{} + temp := make([]byte, 2) + for _, rn := range string(bs) { + if rn > 0xffff { + ok = false + break + } + binary.LittleEndian.PutUint16(temp, uint16(rn)) + rs = append(rs, temp...) + } + if ok { + if rs[0] == 0xff && rs[1] == 0xfe { + rs = rs[2:] + } + err = json.Unmarshal(rs, v) + } + } + if err != nil && len(bs) > 2 && bs[0] == 0xff && bs[1] == 0xfe { + err = json.Unmarshal(bs[2:], v) + } + return err + } + + // LoginSource represents an external way for authorizing users. + type LoginSource struct { + ID int64 `xorm:"pk autoincr"` + Type int + IsActived bool `xorm:"INDEX NOT NULL DEFAULT false"` + IsActive bool `xorm:"INDEX NOT NULL DEFAULT false"` + Cfg string `xorm:"TEXT"` + } + + const ldapType = 2 + const dldapType = 5 - type PullRequest struct { - Flow PullRequestFlow `xorm:"NOT NULL DEFAULT 0"` + type WrappedSource struct { + Source map[string]any } - if err := x.Sync2(new(PullRequest)); err != nil { - return fmt.Errorf("sync2: %w", err) + // change lower_email as unique + if err := x.Sync2(new(LoginSource)); err != nil { + return err } - return nil + + sess := x.NewSession() + defer sess.Close() + + const batchSize = 100 + for start := 0; ; start += batchSize { + sources := make([]*LoginSource, 0, batchSize) + if err := sess.Limit(batchSize, start).Where("`type` = ? OR `type` = ?", ldapType, dldapType).Find(&sources); err != nil { + return err + } + if len(sources) == 0 { + break + } + + for _, source := range sources { + wrapped := &WrappedSource{ + Source: map[string]any{}, + } + err := jsonUnmarshalHandleDoubleEncode([]byte(source.Cfg), &wrapped) + if err != nil { + return fmt.Errorf("failed to unmarshal %s: %w", source.Cfg, err) + } + if wrapped.Source != nil && len(wrapped.Source) > 0 { + bs, err := json.Marshal(wrapped.Source) + if err != nil { + return err + } + source.Cfg = string(bs) + if _, err := sess.ID(source.ID).Cols("cfg").Update(source); err != nil { + return err + } + } + } + } + + if _, err := x.SetExpr("is_active", "is_actived").Update(&LoginSource{}); err != nil { + return fmt.Errorf("SetExpr Update failed: %w", err) + } + + if err := sess.Begin(); err != nil { + return err + } + if err := base.DropTableColumns(sess, "login_source", "is_actived"); err != nil { + return err + } + + return sess.Commit() } diff --git a/models/migrations/v1_16/v189_test.go b/models/migrations/v1_16/v190_test.go similarity index 100% rename from models/migrations/v1_16/v189_test.go rename to models/migrations/v1_16/v190_test.go diff --git a/models/migrations/v1_16/v191.go b/models/migrations/v1_16/v191.go index c618783c08e86..1853729ae99dc 100644 --- a/models/migrations/v1_16/v191.go +++ b/models/migrations/v1_16/v191.go @@ -4,25 +4,20 @@ package v1_16 //nolint import ( - "code.gitea.io/gitea/modules/setting" + "fmt" "xorm.io/xorm" ) -func AlterIssueAndCommentTextFieldsToLongText(x *xorm.Engine) error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err +func AddAgitFlowPullRequest(x *xorm.Engine) error { + type PullRequestFlow int + + type PullRequest struct { + Flow PullRequestFlow `xorm:"NOT NULL DEFAULT 0"` } - if setting.Database.Type.IsMySQL() { - if _, err := sess.Exec("ALTER TABLE `issue` CHANGE `content` `content` LONGTEXT"); err != nil { - return err - } - if _, err := sess.Exec("ALTER TABLE `comment` CHANGE `content` `content` LONGTEXT, CHANGE `patch` `patch` LONGTEXT"); err != nil { - return err - } + if err := x.Sync2(new(PullRequest)); err != nil { + return fmt.Errorf("sync2: %w", err) } - return sess.Commit() + return nil } diff --git a/models/migrations/v1_16/v192.go b/models/migrations/v1_16/v192.go index 2d5d158a09e0d..c618783c08e86 100644 --- a/models/migrations/v1_16/v192.go +++ b/models/migrations/v1_16/v192.go @@ -4,16 +4,25 @@ package v1_16 //nolint import ( - "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/setting" "xorm.io/xorm" ) -func RecreateIssueResourceIndexTable(x *xorm.Engine) error { - type IssueIndex struct { - GroupID int64 `xorm:"pk"` - MaxIndex int64 `xorm:"index"` +func AlterIssueAndCommentTextFieldsToLongText(x *xorm.Engine) error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err } - return base.RecreateTables(new(IssueIndex))(x) + if setting.Database.Type.IsMySQL() { + if _, err := sess.Exec("ALTER TABLE `issue` CHANGE `content` `content` LONGTEXT"); err != nil { + return err + } + if _, err := sess.Exec("ALTER TABLE `comment` CHANGE `content` `content` LONGTEXT, CHANGE `patch` `patch` LONGTEXT"); err != nil { + return err + } + } + return sess.Commit() } diff --git a/models/migrations/v1_16/v193.go b/models/migrations/v1_16/v193.go index dd50e353e3713..2d5d158a09e0d 100644 --- a/models/migrations/v1_16/v193.go +++ b/models/migrations/v1_16/v193.go @@ -4,29 +4,16 @@ package v1_16 //nolint import ( + "code.gitea.io/gitea/models/migrations/base" + "xorm.io/xorm" ) -func AddRepoIDForAttachment(x *xorm.Engine) error { - type Attachment struct { - ID int64 `xorm:"pk autoincr"` - UUID string `xorm:"uuid UNIQUE"` - RepoID int64 `xorm:"INDEX"` // this should not be zero - IssueID int64 `xorm:"INDEX"` // maybe zero when creating - ReleaseID int64 `xorm:"INDEX"` // maybe zero when creating - UploaderID int64 `xorm:"INDEX DEFAULT 0"` - } - if err := x.Sync2(new(Attachment)); err != nil { - return err - } - - if _, err := x.Exec("UPDATE `attachment` set repo_id = (SELECT repo_id FROM `issue` WHERE `issue`.id = `attachment`.issue_id) WHERE `attachment`.issue_id > 0"); err != nil { - return err - } - - if _, err := x.Exec("UPDATE `attachment` set repo_id = (SELECT repo_id FROM `release` WHERE `release`.id = `attachment`.release_id) WHERE `attachment`.release_id > 0"); err != nil { - return err +func RecreateIssueResourceIndexTable(x *xorm.Engine) error { + type IssueIndex struct { + GroupID int64 `xorm:"pk"` + MaxIndex int64 `xorm:"index"` } - return nil + return base.RecreateTables(new(IssueIndex))(x) } diff --git a/models/migrations/v1_16/v194.go b/models/migrations/v1_16/v194.go index ae7fe10bfc56f..dd50e353e3713 100644 --- a/models/migrations/v1_16/v194.go +++ b/models/migrations/v1_16/v194.go @@ -4,18 +4,29 @@ package v1_16 //nolint import ( - "fmt" - "xorm.io/xorm" ) -func AddBranchProtectionUnprotectedFilesColumn(x *xorm.Engine) error { - type ProtectedBranch struct { - UnprotectedFilePatterns string `xorm:"TEXT"` +func AddRepoIDForAttachment(x *xorm.Engine) error { + type Attachment struct { + ID int64 `xorm:"pk autoincr"` + UUID string `xorm:"uuid UNIQUE"` + RepoID int64 `xorm:"INDEX"` // this should not be zero + IssueID int64 `xorm:"INDEX"` // maybe zero when creating + ReleaseID int64 `xorm:"INDEX"` // maybe zero when creating + UploaderID int64 `xorm:"INDEX DEFAULT 0"` + } + if err := x.Sync2(new(Attachment)); err != nil { + return err } - if err := x.Sync2(new(ProtectedBranch)); err != nil { - return fmt.Errorf("Sync2: %w", err) + if _, err := x.Exec("UPDATE `attachment` set repo_id = (SELECT repo_id FROM `issue` WHERE `issue`.id = `attachment`.issue_id) WHERE `attachment`.issue_id > 0"); err != nil { + return err } + + if _, err := x.Exec("UPDATE `attachment` set repo_id = (SELECT repo_id FROM `release` WHERE `release`.id = `attachment`.release_id) WHERE `attachment`.release_id > 0"); err != nil { + return err + } + return nil } diff --git a/models/migrations/v1_16/v193_test.go b/models/migrations/v1_16/v194_test.go similarity index 100% rename from models/migrations/v1_16/v193_test.go rename to models/migrations/v1_16/v194_test.go diff --git a/models/migrations/v1_16/v195.go b/models/migrations/v1_16/v195.go index 9e390a971e471..ae7fe10bfc56f 100644 --- a/models/migrations/v1_16/v195.go +++ b/models/migrations/v1_16/v195.go @@ -9,38 +9,13 @@ import ( "xorm.io/xorm" ) -func AddTableCommitStatusIndex(x *xorm.Engine) error { - // CommitStatusIndex represents a table for commit status index - type CommitStatusIndex struct { - ID int64 - RepoID int64 `xorm:"unique(repo_sha)"` - SHA string `xorm:"unique(repo_sha)"` - MaxIndex int64 `xorm:"index"` +func AddBranchProtectionUnprotectedFilesColumn(x *xorm.Engine) error { + type ProtectedBranch struct { + UnprotectedFilePatterns string `xorm:"TEXT"` } - if err := x.Sync2(new(CommitStatusIndex)); err != nil { + if err := x.Sync2(new(ProtectedBranch)); err != nil { return fmt.Errorf("Sync2: %w", err) } - - sess := x.NewSession() - defer sess.Close() - - if err := sess.Begin(); err != nil { - return err - } - - // Remove data we're goint to rebuild - if _, err := sess.Table("commit_status_index").Where("1=1").Delete(&CommitStatusIndex{}); err != nil { - return err - } - - // Create current data for all repositories with issues and PRs - if _, err := sess.Exec("INSERT INTO commit_status_index (repo_id, sha, max_index) " + - "SELECT max_data.repo_id, max_data.sha, max_data.max_index " + - "FROM ( SELECT commit_status.repo_id AS repo_id, commit_status.sha AS sha, max(commit_status.`index`) AS max_index " + - "FROM commit_status GROUP BY commit_status.repo_id, commit_status.sha) AS max_data"); err != nil { - return err - } - - return sess.Commit() + return nil } diff --git a/models/migrations/v1_16/v196.go b/models/migrations/v1_16/v196.go index ed7f4185a1bfc..9e390a971e471 100644 --- a/models/migrations/v1_16/v196.go +++ b/models/migrations/v1_16/v196.go @@ -9,13 +9,38 @@ import ( "xorm.io/xorm" ) -func AddColorColToProjectBoard(x *xorm.Engine) error { - type ProjectBoard struct { - Color string `xorm:"VARCHAR(7)"` +func AddTableCommitStatusIndex(x *xorm.Engine) error { + // CommitStatusIndex represents a table for commit status index + type CommitStatusIndex struct { + ID int64 + RepoID int64 `xorm:"unique(repo_sha)"` + SHA string `xorm:"unique(repo_sha)"` + MaxIndex int64 `xorm:"index"` } - if err := x.Sync2(new(ProjectBoard)); err != nil { + if err := x.Sync2(new(CommitStatusIndex)); err != nil { return fmt.Errorf("Sync2: %w", err) } - return nil + + sess := x.NewSession() + defer sess.Close() + + if err := sess.Begin(); err != nil { + return err + } + + // Remove data we're goint to rebuild + if _, err := sess.Table("commit_status_index").Where("1=1").Delete(&CommitStatusIndex{}); err != nil { + return err + } + + // Create current data for all repositories with issues and PRs + if _, err := sess.Exec("INSERT INTO commit_status_index (repo_id, sha, max_index) " + + "SELECT max_data.repo_id, max_data.sha, max_data.max_index " + + "FROM ( SELECT commit_status.repo_id AS repo_id, commit_status.sha AS sha, max(commit_status.`index`) AS max_index " + + "FROM commit_status GROUP BY commit_status.repo_id, commit_status.sha) AS max_data"); err != nil { + return err + } + + return sess.Commit() } diff --git a/models/migrations/v1_16/v195_test.go b/models/migrations/v1_16/v196_test.go similarity index 100% rename from models/migrations/v1_16/v195_test.go rename to models/migrations/v1_16/v196_test.go diff --git a/models/migrations/v1_16/v197.go b/models/migrations/v1_16/v197.go index ef9d57a79e5bb..ed7f4185a1bfc 100644 --- a/models/migrations/v1_16/v197.go +++ b/models/migrations/v1_16/v197.go @@ -4,16 +4,18 @@ package v1_16 //nolint import ( + "fmt" + "xorm.io/xorm" ) -func AddRenamedBranchTable(x *xorm.Engine) error { - type RenamedBranch struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"INDEX NOT NULL"` - From string - To string - CreatedUnix int64 `xorm:"created"` +func AddColorColToProjectBoard(x *xorm.Engine) error { + type ProjectBoard struct { + Color string `xorm:"VARCHAR(7)"` + } + + if err := x.Sync2(new(ProjectBoard)); err != nil { + return fmt.Errorf("Sync2: %w", err) } - return x.Sync2(new(RenamedBranch)) + return nil } diff --git a/models/migrations/v1_16/v198.go b/models/migrations/v1_16/v198.go index ed792de793c19..ef9d57a79e5bb 100644 --- a/models/migrations/v1_16/v198.go +++ b/models/migrations/v1_16/v198.go @@ -4,29 +4,16 @@ package v1_16 //nolint import ( - "fmt" - - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func AddTableIssueContentHistory(x *xorm.Engine) error { - type IssueContentHistory struct { - ID int64 `xorm:"pk autoincr"` - PosterID int64 - IssueID int64 `xorm:"INDEX"` - CommentID int64 `xorm:"INDEX"` - EditedUnix timeutil.TimeStamp `xorm:"INDEX"` - ContentText string `xorm:"LONGTEXT"` - IsFirstCreated bool - IsDeleted bool - } - - sess := x.NewSession() - defer sess.Close() - if err := sess.Sync2(new(IssueContentHistory)); err != nil { - return fmt.Errorf("Sync2: %w", err) +func AddRenamedBranchTable(x *xorm.Engine) error { + type RenamedBranch struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"INDEX NOT NULL"` + From string + To string + CreatedUnix int64 `xorm:"created"` } - return sess.Commit() + return x.Sync2(new(RenamedBranch)) } diff --git a/models/migrations/v1_16/v199.go b/models/migrations/v1_16/v199.go index 6adcf890afb67..ed792de793c19 100644 --- a/models/migrations/v1_16/v199.go +++ b/models/migrations/v1_16/v199.go @@ -3,4 +3,30 @@ package v1_16 //nolint -// We used to use a table `remote_version` to store information for updater, now we use `AppState`, so this migration task is a no-op now. +import ( + "fmt" + + "code.gitea.io/gitea/modules/timeutil" + + "xorm.io/xorm" +) + +func AddTableIssueContentHistory(x *xorm.Engine) error { + type IssueContentHistory struct { + ID int64 `xorm:"pk autoincr"` + PosterID int64 + IssueID int64 `xorm:"INDEX"` + CommentID int64 `xorm:"INDEX"` + EditedUnix timeutil.TimeStamp `xorm:"INDEX"` + ContentText string `xorm:"LONGTEXT"` + IsFirstCreated bool + IsDeleted bool + } + + sess := x.NewSession() + defer sess.Close() + if err := sess.Sync2(new(IssueContentHistory)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } + return sess.Commit() +} diff --git a/models/migrations/v1_16/v200.go b/models/migrations/v1_16/v200.go index e39f4af9e86fb..6adcf890afb67 100644 --- a/models/migrations/v1_16/v200.go +++ b/models/migrations/v1_16/v200.go @@ -3,20 +3,4 @@ package v1_16 //nolint -import ( - "fmt" - - "xorm.io/xorm" -) - -func AddTableAppState(x *xorm.Engine) error { - type AppState struct { - ID string `xorm:"pk varchar(200)"` - Revision int64 - Content string `xorm:"LONGTEXT"` - } - if err := x.Sync2(new(AppState)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - return nil -} +// We used to use a table `remote_version` to store information for updater, now we use `AppState`, so this migration task is a no-op now. diff --git a/models/migrations/v1_16/v201.go b/models/migrations/v1_16/v201.go index 35e0c9f2fbe34..e39f4af9e86fb 100644 --- a/models/migrations/v1_16/v201.go +++ b/models/migrations/v1_16/v201.go @@ -4,11 +4,19 @@ package v1_16 //nolint import ( + "fmt" + "xorm.io/xorm" ) -func DropTableRemoteVersion(x *xorm.Engine) error { - // drop the orphaned table introduced in `v199`, now the update checker also uses AppState, do not need this table - _ = x.DropTables("remote_version") +func AddTableAppState(x *xorm.Engine) error { + type AppState struct { + ID string `xorm:"pk varchar(200)"` + Revision int64 + Content string `xorm:"LONGTEXT"` + } + if err := x.Sync2(new(AppState)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } return nil } diff --git a/models/migrations/v1_16/v202.go b/models/migrations/v1_16/v202.go index 79676f3fab2d2..35e0c9f2fbe34 100644 --- a/models/migrations/v1_16/v202.go +++ b/models/migrations/v1_16/v202.go @@ -4,20 +4,11 @@ package v1_16 //nolint import ( - "fmt" - "xorm.io/xorm" ) -func CreateUserSettingsTable(x *xorm.Engine) error { - type UserSetting struct { - ID int64 `xorm:"pk autoincr"` - UserID int64 `xorm:"index unique(key_userid)"` // to load all of someone's settings - SettingKey string `xorm:"varchar(255) index unique(key_userid)"` // ensure key is always lowercase - SettingValue string `xorm:"text"` - } - if err := x.Sync2(new(UserSetting)); err != nil { - return fmt.Errorf("sync2: %w", err) - } +func DropTableRemoteVersion(x *xorm.Engine) error { + // drop the orphaned table introduced in `v199`, now the update checker also uses AppState, do not need this table + _ = x.DropTables("remote_version") return nil } diff --git a/models/migrations/v1_16/v203.go b/models/migrations/v1_16/v203.go index 26ec135a07cd2..79676f3fab2d2 100644 --- a/models/migrations/v1_16/v203.go +++ b/models/migrations/v1_16/v203.go @@ -4,14 +4,20 @@ package v1_16 //nolint import ( + "fmt" + "xorm.io/xorm" ) -func AddProjectIssueSorting(x *xorm.Engine) error { - // ProjectIssue saves relation from issue to a project - type ProjectIssue struct { - Sorting int64 `xorm:"NOT NULL DEFAULT 0"` +func CreateUserSettingsTable(x *xorm.Engine) error { + type UserSetting struct { + ID int64 `xorm:"pk autoincr"` + UserID int64 `xorm:"index unique(key_userid)"` // to load all of someone's settings + SettingKey string `xorm:"varchar(255) index unique(key_userid)"` // ensure key is always lowercase + SettingValue string `xorm:"text"` } - - return x.Sync2(new(ProjectIssue)) + if err := x.Sync2(new(UserSetting)); err != nil { + return fmt.Errorf("sync2: %w", err) + } + return nil } diff --git a/models/migrations/v1_16/v204.go b/models/migrations/v1_16/v204.go index e7577c8da49ca..26ec135a07cd2 100644 --- a/models/migrations/v1_16/v204.go +++ b/models/migrations/v1_16/v204.go @@ -3,12 +3,15 @@ package v1_16 //nolint -import "xorm.io/xorm" +import ( + "xorm.io/xorm" +) -func AddSSHKeyIsVerified(x *xorm.Engine) error { - type PublicKey struct { - Verified bool `xorm:"NOT NULL DEFAULT false"` +func AddProjectIssueSorting(x *xorm.Engine) error { + // ProjectIssue saves relation from issue to a project + type ProjectIssue struct { + Sorting int64 `xorm:"NOT NULL DEFAULT 0"` } - return x.Sync2(new(PublicKey)) + return x.Sync2(new(ProjectIssue)) } diff --git a/models/migrations/v1_16/v205.go b/models/migrations/v1_16/v205.go index d6c577083cdca..e7577c8da49ca 100644 --- a/models/migrations/v1_16/v205.go +++ b/models/migrations/v1_16/v205.go @@ -1,42 +1,14 @@ -// Copyright 2022 The Gitea Authors. All rights reserved. +// Copyright 2021 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_16 //nolint -import ( - "code.gitea.io/gitea/models/migrations/base" +import "xorm.io/xorm" - "xorm.io/xorm" - "xorm.io/xorm/schemas" -) - -func MigrateUserPasswordSalt(x *xorm.Engine) error { - dbType := x.Dialect().URI().DBType - // For SQLITE, the max length doesn't matter. - if dbType == schemas.SQLITE { - return nil - } - - if err := base.ModifyColumn(x, "user", &schemas.Column{ - Name: "rands", - SQLType: schemas.SQLType{ - Name: "VARCHAR", - }, - Length: 32, - // MySQL will like us again. - Nullable: true, - DefaultIsEmpty: true, - }); err != nil { - return err +func AddSSHKeyIsVerified(x *xorm.Engine) error { + type PublicKey struct { + Verified bool `xorm:"NOT NULL DEFAULT false"` } - return base.ModifyColumn(x, "user", &schemas.Column{ - Name: "salt", - SQLType: schemas.SQLType{ - Name: "VARCHAR", - }, - Length: 32, - Nullable: true, - DefaultIsEmpty: true, - }) + return x.Sync2(new(PublicKey)) } diff --git a/models/migrations/v1_16/v206.go b/models/migrations/v1_16/v206.go index 64c794a2d0f21..d6c577083cdca 100644 --- a/models/migrations/v1_16/v206.go +++ b/models/migrations/v1_16/v206.go @@ -4,25 +4,39 @@ package v1_16 //nolint import ( - "fmt" + "code.gitea.io/gitea/models/migrations/base" "xorm.io/xorm" + "xorm.io/xorm/schemas" ) -func AddAuthorizeColForTeamUnit(x *xorm.Engine) error { - type TeamUnit struct { - ID int64 `xorm:"pk autoincr"` - OrgID int64 `xorm:"INDEX"` - TeamID int64 `xorm:"UNIQUE(s)"` - Type int `xorm:"UNIQUE(s)"` - AccessMode int +func MigrateUserPasswordSalt(x *xorm.Engine) error { + dbType := x.Dialect().URI().DBType + // For SQLITE, the max length doesn't matter. + if dbType == schemas.SQLITE { + return nil } - if err := x.Sync2(new(TeamUnit)); err != nil { - return fmt.Errorf("sync2: %w", err) + if err := base.ModifyColumn(x, "user", &schemas.Column{ + Name: "rands", + SQLType: schemas.SQLType{ + Name: "VARCHAR", + }, + Length: 32, + // MySQL will like us again. + Nullable: true, + DefaultIsEmpty: true, + }); err != nil { + return err } - // migrate old permission - _, err := x.Exec("UPDATE team_unit SET access_mode = (SELECT authorize FROM team WHERE team.id = team_unit.team_id)") - return err + return base.ModifyColumn(x, "user", &schemas.Column{ + Name: "salt", + SQLType: schemas.SQLType{ + Name: "VARCHAR", + }, + Length: 32, + Nullable: true, + DefaultIsEmpty: true, + }) } diff --git a/models/migrations/v1_16/v207.go b/models/migrations/v1_16/v207.go index 91208f066cabe..64c794a2d0f21 100644 --- a/models/migrations/v1_16/v207.go +++ b/models/migrations/v1_16/v207.go @@ -1,14 +1,28 @@ -// Copyright 2021 The Gitea Authors. All rights reserved. +// Copyright 2022 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_16 //nolint import ( + "fmt" + "xorm.io/xorm" ) -func AddWebAuthnCred(x *xorm.Engine) error { - // NO-OP Don't migrate here - let v210 do this. +func AddAuthorizeColForTeamUnit(x *xorm.Engine) error { + type TeamUnit struct { + ID int64 `xorm:"pk autoincr"` + OrgID int64 `xorm:"INDEX"` + TeamID int64 `xorm:"UNIQUE(s)"` + Type int `xorm:"UNIQUE(s)"` + AccessMode int + } + + if err := x.Sync2(new(TeamUnit)); err != nil { + return fmt.Errorf("sync2: %w", err) + } - return nil + // migrate old permission + _, err := x.Exec("UPDATE team_unit SET access_mode = (SELECT authorize FROM team WHERE team.id = team_unit.team_id)") + return err } diff --git a/models/migrations/v1_16/v208.go b/models/migrations/v1_16/v208.go index 1a11ef096ad9a..91208f066cabe 100644 --- a/models/migrations/v1_16/v208.go +++ b/models/migrations/v1_16/v208.go @@ -7,7 +7,8 @@ import ( "xorm.io/xorm" ) -func UseBase32HexForCredIDInWebAuthnCredential(x *xorm.Engine) error { - // noop +func AddWebAuthnCred(x *xorm.Engine) error { + // NO-OP Don't migrate here - let v210 do this. + return nil } diff --git a/models/migrations/v1_16/v209.go b/models/migrations/v1_16/v209.go index be3100e02a047..1a11ef096ad9a 100644 --- a/models/migrations/v1_16/v209.go +++ b/models/migrations/v1_16/v209.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Gitea Authors. All rights reserved. +// Copyright 2021 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_16 //nolint @@ -7,10 +7,7 @@ import ( "xorm.io/xorm" ) -func IncreaseCredentialIDTo410(x *xorm.Engine) error { - // no-op - // v208 was completely wrong - // So now we have to no-op again. - +func UseBase32HexForCredIDInWebAuthnCredential(x *xorm.Engine) error { + // noop return nil } diff --git a/models/migrations/v1_16/v210.go b/models/migrations/v1_16/v210.go index 974e77fb8421c..be3100e02a047 100644 --- a/models/migrations/v1_16/v210.go +++ b/models/migrations/v1_16/v210.go @@ -4,181 +4,13 @@ package v1_16 //nolint import ( - "crypto/elliptic" - "encoding/base32" - "fmt" - "strings" - - "code.gitea.io/gitea/models/migrations/base" - "code.gitea.io/gitea/modules/timeutil" - - "github.com/tstranex/u2f" "xorm.io/xorm" - "xorm.io/xorm/schemas" ) -// v208 migration was completely broken -func RemigrateU2FCredentials(x *xorm.Engine) error { - // Create webauthnCredential table - type webauthnCredential struct { - ID int64 `xorm:"pk autoincr"` - Name string - LowerName string `xorm:"unique(s)"` - UserID int64 `xorm:"INDEX unique(s)"` - CredentialID string `xorm:"INDEX VARCHAR(410)"` // CredentalID in U2F is at most 255bytes / 5 * 8 = 408 - add a few extra characters for safety - PublicKey []byte - AttestationType string - AAGUID []byte - SignCount uint32 `xorm:"BIGINT"` - CloneWarning bool - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - } - if err := x.Sync2(&webauthnCredential{}); err != nil { - return err - } - - switch x.Dialect().URI().DBType { - case schemas.MYSQL: - _, err := x.Exec("ALTER TABLE webauthn_credential MODIFY COLUMN credential_id VARCHAR(410)") - if err != nil { - return err - } - case schemas.ORACLE: - _, err := x.Exec("ALTER TABLE webauthn_credential MODIFY credential_id VARCHAR(410)") - if err != nil { - return err - } - case schemas.MSSQL: - // This column has an index on it. I could write all of the code to attempt to change the index OR - // I could just use recreate table. - sess := x.NewSession() - if err := sess.Begin(); err != nil { - _ = sess.Close() - return err - } - - if err := base.RecreateTable(sess, new(webauthnCredential)); err != nil { - _ = sess.Close() - return err - } - if err := sess.Commit(); err != nil { - _ = sess.Close() - return err - } - if err := sess.Close(); err != nil { - return err - } - case schemas.POSTGRES: - _, err := x.Exec("ALTER TABLE webauthn_credential ALTER COLUMN credential_id TYPE VARCHAR(410)") - if err != nil { - return err - } - default: - // SQLite doesn't support ALTER COLUMN, and it already makes String _TEXT_ by default so no migration needed - // nor is there any need to re-migrate - } - - exist, err := x.IsTableExist("u2f_registration") - if err != nil { - return err - } - if !exist { - return nil - } - - // Now migrate the old u2f registrations to the new format - type u2fRegistration struct { - ID int64 `xorm:"pk autoincr"` - Name string - UserID int64 `xorm:"INDEX"` - Raw []byte - Counter uint32 `xorm:"BIGINT"` - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - } - - var start int - regs := make([]*u2fRegistration, 0, 50) - for { - err := x.OrderBy("id").Limit(50, start).Find(®s) - if err != nil { - return err - } - - err = func() error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return fmt.Errorf("unable to allow start session. Error: %w", err) - } - if x.Dialect().URI().DBType == schemas.MSSQL { - if _, err := sess.Exec("SET IDENTITY_INSERT `webauthn_credential` ON"); err != nil { - return fmt.Errorf("unable to allow identity insert on webauthn_credential. Error: %w", err) - } - } - for _, reg := range regs { - parsed := new(u2f.Registration) - err = parsed.UnmarshalBinary(reg.Raw) - if err != nil { - continue - } - remigrated := &webauthnCredential{ - ID: reg.ID, - Name: reg.Name, - LowerName: strings.ToLower(reg.Name), - UserID: reg.UserID, - CredentialID: base32.HexEncoding.EncodeToString(parsed.KeyHandle), - PublicKey: elliptic.Marshal(elliptic.P256(), parsed.PubKey.X, parsed.PubKey.Y), - AttestationType: "fido-u2f", - AAGUID: []byte{}, - SignCount: reg.Counter, - UpdatedUnix: reg.UpdatedUnix, - CreatedUnix: reg.CreatedUnix, - } - - has, err := sess.ID(reg.ID).Get(new(webauthnCredential)) - if err != nil { - return fmt.Errorf("unable to get webauthn_credential[%d]. Error: %w", reg.ID, err) - } - if !has { - has, err := sess.Where("`lower_name`=?", remigrated.LowerName).And("`user_id`=?", remigrated.UserID).Exist(new(webauthnCredential)) - if err != nil { - return fmt.Errorf("unable to check webauthn_credential[lower_name: %s, user_id: %d]. Error: %w", remigrated.LowerName, remigrated.UserID, err) - } - if !has { - _, err = sess.Insert(remigrated) - if err != nil { - return fmt.Errorf("unable to (re)insert webauthn_credential[%d]. Error: %w", reg.ID, err) - } - - continue - } - } - - _, err = sess.ID(remigrated.ID).AllCols().Update(remigrated) - if err != nil { - return fmt.Errorf("unable to update webauthn_credential[%d]. Error: %w", reg.ID, err) - } - } - return sess.Commit() - }() - if err != nil { - return err - } - - if len(regs) < 50 { - break - } - start += 50 - regs = regs[:0] - } - - if x.Dialect().URI().DBType == schemas.POSTGRES { - if _, err := x.Exec("SELECT setval('webauthn_credential_id_seq', COALESCE((SELECT MAX(id)+1 FROM `webauthn_credential`), 1), false)"); err != nil { - return err - } - } +func IncreaseCredentialIDTo410(x *xorm.Engine) error { + // no-op + // v208 was completely wrong + // So now we have to no-op again. return nil } diff --git a/models/migrations/v1_16/v211.go b/models/migrations/v1_16/v211.go new file mode 100644 index 0000000000000..974e77fb8421c --- /dev/null +++ b/models/migrations/v1_16/v211.go @@ -0,0 +1,184 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_16 //nolint + +import ( + "crypto/elliptic" + "encoding/base32" + "fmt" + "strings" + + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/timeutil" + + "github.com/tstranex/u2f" + "xorm.io/xorm" + "xorm.io/xorm/schemas" +) + +// v208 migration was completely broken +func RemigrateU2FCredentials(x *xorm.Engine) error { + // Create webauthnCredential table + type webauthnCredential struct { + ID int64 `xorm:"pk autoincr"` + Name string + LowerName string `xorm:"unique(s)"` + UserID int64 `xorm:"INDEX unique(s)"` + CredentialID string `xorm:"INDEX VARCHAR(410)"` // CredentalID in U2F is at most 255bytes / 5 * 8 = 408 - add a few extra characters for safety + PublicKey []byte + AttestationType string + AAGUID []byte + SignCount uint32 `xorm:"BIGINT"` + CloneWarning bool + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + if err := x.Sync2(&webauthnCredential{}); err != nil { + return err + } + + switch x.Dialect().URI().DBType { + case schemas.MYSQL: + _, err := x.Exec("ALTER TABLE webauthn_credential MODIFY COLUMN credential_id VARCHAR(410)") + if err != nil { + return err + } + case schemas.ORACLE: + _, err := x.Exec("ALTER TABLE webauthn_credential MODIFY credential_id VARCHAR(410)") + if err != nil { + return err + } + case schemas.MSSQL: + // This column has an index on it. I could write all of the code to attempt to change the index OR + // I could just use recreate table. + sess := x.NewSession() + if err := sess.Begin(); err != nil { + _ = sess.Close() + return err + } + + if err := base.RecreateTable(sess, new(webauthnCredential)); err != nil { + _ = sess.Close() + return err + } + if err := sess.Commit(); err != nil { + _ = sess.Close() + return err + } + if err := sess.Close(); err != nil { + return err + } + case schemas.POSTGRES: + _, err := x.Exec("ALTER TABLE webauthn_credential ALTER COLUMN credential_id TYPE VARCHAR(410)") + if err != nil { + return err + } + default: + // SQLite doesn't support ALTER COLUMN, and it already makes String _TEXT_ by default so no migration needed + // nor is there any need to re-migrate + } + + exist, err := x.IsTableExist("u2f_registration") + if err != nil { + return err + } + if !exist { + return nil + } + + // Now migrate the old u2f registrations to the new format + type u2fRegistration struct { + ID int64 `xorm:"pk autoincr"` + Name string + UserID int64 `xorm:"INDEX"` + Raw []byte + Counter uint32 `xorm:"BIGINT"` + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + + var start int + regs := make([]*u2fRegistration, 0, 50) + for { + err := x.OrderBy("id").Limit(50, start).Find(®s) + if err != nil { + return err + } + + err = func() error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return fmt.Errorf("unable to allow start session. Error: %w", err) + } + if x.Dialect().URI().DBType == schemas.MSSQL { + if _, err := sess.Exec("SET IDENTITY_INSERT `webauthn_credential` ON"); err != nil { + return fmt.Errorf("unable to allow identity insert on webauthn_credential. Error: %w", err) + } + } + for _, reg := range regs { + parsed := new(u2f.Registration) + err = parsed.UnmarshalBinary(reg.Raw) + if err != nil { + continue + } + remigrated := &webauthnCredential{ + ID: reg.ID, + Name: reg.Name, + LowerName: strings.ToLower(reg.Name), + UserID: reg.UserID, + CredentialID: base32.HexEncoding.EncodeToString(parsed.KeyHandle), + PublicKey: elliptic.Marshal(elliptic.P256(), parsed.PubKey.X, parsed.PubKey.Y), + AttestationType: "fido-u2f", + AAGUID: []byte{}, + SignCount: reg.Counter, + UpdatedUnix: reg.UpdatedUnix, + CreatedUnix: reg.CreatedUnix, + } + + has, err := sess.ID(reg.ID).Get(new(webauthnCredential)) + if err != nil { + return fmt.Errorf("unable to get webauthn_credential[%d]. Error: %w", reg.ID, err) + } + if !has { + has, err := sess.Where("`lower_name`=?", remigrated.LowerName).And("`user_id`=?", remigrated.UserID).Exist(new(webauthnCredential)) + if err != nil { + return fmt.Errorf("unable to check webauthn_credential[lower_name: %s, user_id: %d]. Error: %w", remigrated.LowerName, remigrated.UserID, err) + } + if !has { + _, err = sess.Insert(remigrated) + if err != nil { + return fmt.Errorf("unable to (re)insert webauthn_credential[%d]. Error: %w", reg.ID, err) + } + + continue + } + } + + _, err = sess.ID(remigrated.ID).AllCols().Update(remigrated) + if err != nil { + return fmt.Errorf("unable to update webauthn_credential[%d]. Error: %w", reg.ID, err) + } + } + return sess.Commit() + }() + if err != nil { + return err + } + + if len(regs) < 50 { + break + } + start += 50 + regs = regs[:0] + } + + if x.Dialect().URI().DBType == schemas.POSTGRES { + if _, err := x.Exec("SELECT setval('webauthn_credential_id_seq', COALESCE((SELECT MAX(id)+1 FROM `webauthn_credential`), 1), false)"); err != nil { + return err + } + } + + return nil +} diff --git a/models/migrations/v1_16/v210_test.go b/models/migrations/v1_16/v211_test.go similarity index 100% rename from models/migrations/v1_16/v210_test.go rename to models/migrations/v1_16/v211_test.go diff --git a/models/migrations/v1_17/v211.go b/models/migrations/v1_17/v211.go deleted file mode 100644 index 9b72c8610b6ea..0000000000000 --- a/models/migrations/v1_17/v211.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_17 //nolint - -import ( - "xorm.io/xorm" -) - -func CreateForeignReferenceTable(_ *xorm.Engine) error { - return nil // This table was dropped in v1_19/v237.go -} diff --git a/models/migrations/v1_17/v212.go b/models/migrations/v1_17/v212.go index 536ba0a2c42d4..9b72c8610b6ea 100644 --- a/models/migrations/v1_17/v212.go +++ b/models/migrations/v1_17/v212.go @@ -4,90 +4,9 @@ package v1_17 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func AddPackageTables(x *xorm.Engine) error { - type Package struct { - ID int64 `xorm:"pk autoincr"` - OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` - RepoID int64 `xorm:"INDEX"` - Type string `xorm:"UNIQUE(s) INDEX NOT NULL"` - Name string `xorm:"NOT NULL"` - LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` - SemverCompatible bool `xorm:"NOT NULL DEFAULT false"` - } - - if err := x.Sync2(new(Package)); err != nil { - return err - } - - type PackageVersion struct { - ID int64 `xorm:"pk autoincr"` - PackageID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` - CreatorID int64 `xorm:"NOT NULL DEFAULT 0"` - Version string `xorm:"NOT NULL"` - LowerVersion string `xorm:"UNIQUE(s) INDEX NOT NULL"` - CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"` - IsInternal bool `xorm:"INDEX NOT NULL DEFAULT false"` - MetadataJSON string `xorm:"metadata_json TEXT"` - DownloadCount int64 `xorm:"NOT NULL DEFAULT 0"` - } - - if err := x.Sync2(new(PackageVersion)); err != nil { - return err - } - - type PackageProperty struct { - ID int64 `xorm:"pk autoincr"` - RefType int64 `xorm:"INDEX NOT NULL"` - RefID int64 `xorm:"INDEX NOT NULL"` - Name string `xorm:"INDEX NOT NULL"` - Value string `xorm:"TEXT NOT NULL"` - } - - if err := x.Sync2(new(PackageProperty)); err != nil { - return err - } - - type PackageFile struct { - ID int64 `xorm:"pk autoincr"` - VersionID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` - BlobID int64 `xorm:"INDEX NOT NULL"` - Name string `xorm:"NOT NULL"` - LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` - CompositeKey string `xorm:"UNIQUE(s) INDEX"` - IsLead bool `xorm:"NOT NULL DEFAULT false"` - CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"` - } - - if err := x.Sync2(new(PackageFile)); err != nil { - return err - } - - type PackageBlob struct { - ID int64 `xorm:"pk autoincr"` - Size int64 `xorm:"NOT NULL DEFAULT 0"` - HashMD5 string `xorm:"hash_md5 char(32) UNIQUE(md5) INDEX NOT NULL"` - HashSHA1 string `xorm:"hash_sha1 char(40) UNIQUE(sha1) INDEX NOT NULL"` - HashSHA256 string `xorm:"hash_sha256 char(64) UNIQUE(sha256) INDEX NOT NULL"` - HashSHA512 string `xorm:"hash_sha512 char(128) UNIQUE(sha512) INDEX NOT NULL"` - CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"` - } - - if err := x.Sync2(new(PackageBlob)); err != nil { - return err - } - - type PackageBlobUpload struct { - ID string `xorm:"pk"` - BytesReceived int64 `xorm:"NOT NULL DEFAULT 0"` - HashStateBytes []byte `xorm:"BLOB"` - CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"` - UpdatedUnix timeutil.TimeStamp `xorm:"updated INDEX NOT NULL"` - } - - return x.Sync2(new(PackageBlobUpload)) +func CreateForeignReferenceTable(_ *xorm.Engine) error { + return nil // This table was dropped in v1_19/v237.go } diff --git a/models/migrations/v1_17/v213.go b/models/migrations/v1_17/v213.go index 8607fdba47f24..536ba0a2c42d4 100644 --- a/models/migrations/v1_17/v213.go +++ b/models/migrations/v1_17/v213.go @@ -4,14 +4,90 @@ package v1_17 //nolint import ( + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func AddAllowMaintainerEdit(x *xorm.Engine) error { - // PullRequest represents relation between pull request and repositories. - type PullRequest struct { - AllowMaintainerEdit bool `xorm:"NOT NULL DEFAULT false"` +func AddPackageTables(x *xorm.Engine) error { + type Package struct { + ID int64 `xorm:"pk autoincr"` + OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` + RepoID int64 `xorm:"INDEX"` + Type string `xorm:"UNIQUE(s) INDEX NOT NULL"` + Name string `xorm:"NOT NULL"` + LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` + SemverCompatible bool `xorm:"NOT NULL DEFAULT false"` + } + + if err := x.Sync2(new(Package)); err != nil { + return err + } + + type PackageVersion struct { + ID int64 `xorm:"pk autoincr"` + PackageID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` + CreatorID int64 `xorm:"NOT NULL DEFAULT 0"` + Version string `xorm:"NOT NULL"` + LowerVersion string `xorm:"UNIQUE(s) INDEX NOT NULL"` + CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"` + IsInternal bool `xorm:"INDEX NOT NULL DEFAULT false"` + MetadataJSON string `xorm:"metadata_json TEXT"` + DownloadCount int64 `xorm:"NOT NULL DEFAULT 0"` + } + + if err := x.Sync2(new(PackageVersion)); err != nil { + return err + } + + type PackageProperty struct { + ID int64 `xorm:"pk autoincr"` + RefType int64 `xorm:"INDEX NOT NULL"` + RefID int64 `xorm:"INDEX NOT NULL"` + Name string `xorm:"INDEX NOT NULL"` + Value string `xorm:"TEXT NOT NULL"` + } + + if err := x.Sync2(new(PackageProperty)); err != nil { + return err + } + + type PackageFile struct { + ID int64 `xorm:"pk autoincr"` + VersionID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` + BlobID int64 `xorm:"INDEX NOT NULL"` + Name string `xorm:"NOT NULL"` + LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` + CompositeKey string `xorm:"UNIQUE(s) INDEX"` + IsLead bool `xorm:"NOT NULL DEFAULT false"` + CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"` + } + + if err := x.Sync2(new(PackageFile)); err != nil { + return err + } + + type PackageBlob struct { + ID int64 `xorm:"pk autoincr"` + Size int64 `xorm:"NOT NULL DEFAULT 0"` + HashMD5 string `xorm:"hash_md5 char(32) UNIQUE(md5) INDEX NOT NULL"` + HashSHA1 string `xorm:"hash_sha1 char(40) UNIQUE(sha1) INDEX NOT NULL"` + HashSHA256 string `xorm:"hash_sha256 char(64) UNIQUE(sha256) INDEX NOT NULL"` + HashSHA512 string `xorm:"hash_sha512 char(128) UNIQUE(sha512) INDEX NOT NULL"` + CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"` + } + + if err := x.Sync2(new(PackageBlob)); err != nil { + return err + } + + type PackageBlobUpload struct { + ID string `xorm:"pk"` + BytesReceived int64 `xorm:"NOT NULL DEFAULT 0"` + HashStateBytes []byte `xorm:"BLOB"` + CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated INDEX NOT NULL"` } - return x.Sync2(new(PullRequest)) + return x.Sync2(new(PackageBlobUpload)) } diff --git a/models/migrations/v1_17/v214.go b/models/migrations/v1_17/v214.go index 3b2351d160407..8607fdba47f24 100644 --- a/models/migrations/v1_17/v214.go +++ b/models/migrations/v1_17/v214.go @@ -7,16 +7,11 @@ import ( "xorm.io/xorm" ) -func AddAutoMergeTable(x *xorm.Engine) error { - type MergeStyle string - type PullAutoMerge struct { - ID int64 `xorm:"pk autoincr"` - PullID int64 `xorm:"UNIQUE"` - DoerID int64 `xorm:"NOT NULL"` - MergeStyle MergeStyle `xorm:"varchar(30)"` - Message string `xorm:"LONGTEXT"` - CreatedUnix int64 `xorm:"created"` +func AddAllowMaintainerEdit(x *xorm.Engine) error { + // PullRequest represents relation between pull request and repositories. + type PullRequest struct { + AllowMaintainerEdit bool `xorm:"NOT NULL DEFAULT false"` } - return x.Sync2(&PullAutoMerge{}) + return x.Sync2(new(PullRequest)) } diff --git a/models/migrations/v1_17/v215.go b/models/migrations/v1_17/v215.go index 0244be216c446..3b2351d160407 100644 --- a/models/migrations/v1_17/v215.go +++ b/models/migrations/v1_17/v215.go @@ -4,21 +4,19 @@ package v1_17 //nolint import ( - "code.gitea.io/gitea/models/pull" - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func AddReviewViewedFiles(x *xorm.Engine) error { - type ReviewState struct { - ID int64 `xorm:"pk autoincr"` - UserID int64 `xorm:"NOT NULL UNIQUE(pull_commit_user)"` - PullID int64 `xorm:"NOT NULL INDEX UNIQUE(pull_commit_user) DEFAULT 0"` - CommitSHA string `xorm:"NOT NULL VARCHAR(40) UNIQUE(pull_commit_user)"` - UpdatedFiles map[string]pull.ViewedState `xorm:"NOT NULL LONGTEXT JSON"` - UpdatedUnix timeutil.TimeStamp `xorm:"updated"` +func AddAutoMergeTable(x *xorm.Engine) error { + type MergeStyle string + type PullAutoMerge struct { + ID int64 `xorm:"pk autoincr"` + PullID int64 `xorm:"UNIQUE"` + DoerID int64 `xorm:"NOT NULL"` + MergeStyle MergeStyle `xorm:"varchar(30)"` + Message string `xorm:"LONGTEXT"` + CreatedUnix int64 `xorm:"created"` } - return x.Sync2(new(ReviewState)) + return x.Sync2(&PullAutoMerge{}) } diff --git a/models/migrations/v1_17/v216.go b/models/migrations/v1_17/v216.go index 59b21d9b2c465..0244be216c446 100644 --- a/models/migrations/v1_17/v216.go +++ b/models/migrations/v1_17/v216.go @@ -3,5 +3,22 @@ package v1_17 //nolint -// This migration added non-ideal indices to the action table which on larger datasets slowed things down -// it has been superceded by v218.go +import ( + "code.gitea.io/gitea/models/pull" + "code.gitea.io/gitea/modules/timeutil" + + "xorm.io/xorm" +) + +func AddReviewViewedFiles(x *xorm.Engine) error { + type ReviewState struct { + ID int64 `xorm:"pk autoincr"` + UserID int64 `xorm:"NOT NULL UNIQUE(pull_commit_user)"` + PullID int64 `xorm:"NOT NULL INDEX UNIQUE(pull_commit_user) DEFAULT 0"` + CommitSHA string `xorm:"NOT NULL VARCHAR(40) UNIQUE(pull_commit_user)"` + UpdatedFiles map[string]pull.ViewedState `xorm:"NOT NULL LONGTEXT JSON"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated"` + } + + return x.Sync2(new(ReviewState)) +} diff --git a/models/migrations/v1_17/v217.go b/models/migrations/v1_17/v217.go index 3f970b68a540d..59b21d9b2c465 100644 --- a/models/migrations/v1_17/v217.go +++ b/models/migrations/v1_17/v217.go @@ -3,23 +3,5 @@ package v1_17 //nolint -import ( - "code.gitea.io/gitea/modules/setting" - - "xorm.io/xorm" -) - -func AlterHookTaskTextFieldsToLongText(x *xorm.Engine) error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - - if setting.Database.Type.IsMySQL() { - if _, err := sess.Exec("ALTER TABLE `hook_task` CHANGE `payload_content` `payload_content` LONGTEXT, CHANGE `request_content` `request_content` LONGTEXT, change `response_content` `response_content` LONGTEXT"); err != nil { - return err - } - } - return sess.Commit() -} +// This migration added non-ideal indices to the action table which on larger datasets slowed things down +// it has been superceded by v218.go diff --git a/models/migrations/v1_17/v218.go b/models/migrations/v1_17/v218.go index ae91ba0c494bc..3f970b68a540d 100644 --- a/models/migrations/v1_17/v218.go +++ b/models/migrations/v1_17/v218.go @@ -5,48 +5,21 @@ package v1_17 //nolint import ( "code.gitea.io/gitea/modules/setting" - "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" - "xorm.io/xorm/schemas" ) -type improveActionTableIndicesAction struct { - ID int64 `xorm:"pk autoincr"` - UserID int64 // Receiver user id. - OpType int - ActUserID int64 // Action user id. - RepoID int64 - CommentID int64 `xorm:"INDEX"` - IsDeleted bool `xorm:"NOT NULL DEFAULT false"` - RefName string - IsPrivate bool `xorm:"NOT NULL DEFAULT false"` - Content string `xorm:"TEXT"` - CreatedUnix timeutil.TimeStamp `xorm:"created"` -} - -// TableName sets the name of this table -func (*improveActionTableIndicesAction) TableName() string { - return "action" -} - -// TableIndices implements xorm's TableIndices interface -func (*improveActionTableIndicesAction) TableIndices() []*schemas.Index { - repoIndex := schemas.NewIndex("r_u_d", schemas.IndexType) - repoIndex.AddColumn("repo_id", "user_id", "is_deleted") - - actUserIndex := schemas.NewIndex("au_r_c_u_d", schemas.IndexType) - actUserIndex.AddColumn("act_user_id", "repo_id", "created_unix", "user_id", "is_deleted") - indices := []*schemas.Index{actUserIndex, repoIndex} - if setting.Database.Type.IsPostgreSQL() { - cudIndex := schemas.NewIndex("c_u_d", schemas.IndexType) - cudIndex.AddColumn("created_unix", "user_id", "is_deleted") - indices = append(indices, cudIndex) +func AlterHookTaskTextFieldsToLongText(x *xorm.Engine) error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err } - return indices -} - -func ImproveActionTableIndices(x *xorm.Engine) error { - return x.Sync2(&improveActionTableIndicesAction{}) + if setting.Database.Type.IsMySQL() { + if _, err := sess.Exec("ALTER TABLE `hook_task` CHANGE `payload_content` `payload_content` LONGTEXT, CHANGE `request_content` `request_content` LONGTEXT, change `response_content` `response_content` LONGTEXT"); err != nil { + return err + } + } + return sess.Commit() } diff --git a/models/migrations/v1_17/v219.go b/models/migrations/v1_17/v219.go index a2165212cc9b2..ae91ba0c494bc 100644 --- a/models/migrations/v1_17/v219.go +++ b/models/migrations/v1_17/v219.go @@ -4,27 +4,49 @@ package v1_17 //nolint import ( - "time" - - "code.gitea.io/gitea/models/repo" + "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" + "xorm.io/xorm/schemas" ) -func AddSyncOnCommitColForPushMirror(x *xorm.Engine) error { - type PushMirror struct { - ID int64 `xorm:"pk autoincr"` - RepoID int64 `xorm:"INDEX"` - Repo *repo.Repository `xorm:"-"` - RemoteName string - - SyncOnCommit bool `xorm:"NOT NULL DEFAULT true"` - Interval time.Duration - CreatedUnix timeutil.TimeStamp `xorm:"created"` - LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"` - LastError string `xorm:"text"` +type improveActionTableIndicesAction struct { + ID int64 `xorm:"pk autoincr"` + UserID int64 // Receiver user id. + OpType int + ActUserID int64 // Action user id. + RepoID int64 + CommentID int64 `xorm:"INDEX"` + IsDeleted bool `xorm:"NOT NULL DEFAULT false"` + RefName string + IsPrivate bool `xorm:"NOT NULL DEFAULT false"` + Content string `xorm:"TEXT"` + CreatedUnix timeutil.TimeStamp `xorm:"created"` +} + +// TableName sets the name of this table +func (*improveActionTableIndicesAction) TableName() string { + return "action" +} + +// TableIndices implements xorm's TableIndices interface +func (*improveActionTableIndicesAction) TableIndices() []*schemas.Index { + repoIndex := schemas.NewIndex("r_u_d", schemas.IndexType) + repoIndex.AddColumn("repo_id", "user_id", "is_deleted") + + actUserIndex := schemas.NewIndex("au_r_c_u_d", schemas.IndexType) + actUserIndex.AddColumn("act_user_id", "repo_id", "created_unix", "user_id", "is_deleted") + indices := []*schemas.Index{actUserIndex, repoIndex} + if setting.Database.Type.IsPostgreSQL() { + cudIndex := schemas.NewIndex("c_u_d", schemas.IndexType) + cudIndex.AddColumn("created_unix", "user_id", "is_deleted") + indices = append(indices, cudIndex) } - return x.Sync2(new(PushMirror)) + return indices +} + +func ImproveActionTableIndices(x *xorm.Engine) error { + return x.Sync2(&improveActionTableIndicesAction{}) } diff --git a/models/migrations/v1_17/v220.go b/models/migrations/v1_17/v220.go index 904ddc5192935..a2165212cc9b2 100644 --- a/models/migrations/v1_17/v220.go +++ b/models/migrations/v1_17/v220.go @@ -4,24 +4,27 @@ package v1_17 //nolint import ( - packages_model "code.gitea.io/gitea/models/packages" - container_module "code.gitea.io/gitea/modules/packages/container" + "time" + + "code.gitea.io/gitea/models/repo" + "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" - "xorm.io/xorm/schemas" ) -func AddContainerRepositoryProperty(x *xorm.Engine) (err error) { - switch x.Dialect().URI().DBType { - case schemas.SQLITE: - _, err = x.Exec("INSERT INTO package_property (ref_type, ref_id, name, value) SELECT ?, p.id, ?, u.lower_name || '/' || p.lower_name FROM package p JOIN `user` u ON p.owner_id = u.id WHERE p.type = ?", - packages_model.PropertyTypePackage, container_module.PropertyRepository, packages_model.TypeContainer) - case schemas.MSSQL: - _, err = x.Exec("INSERT INTO package_property (ref_type, ref_id, name, value) SELECT ?, p.id, ?, u.lower_name + '/' + p.lower_name FROM package p JOIN `user` u ON p.owner_id = u.id WHERE p.type = ?", - packages_model.PropertyTypePackage, container_module.PropertyRepository, packages_model.TypeContainer) - default: - _, err = x.Exec("INSERT INTO package_property (ref_type, ref_id, name, value) SELECT ?, p.id, ?, CONCAT(u.lower_name, '/', p.lower_name) FROM package p JOIN `user` u ON p.owner_id = u.id WHERE p.type = ?", - packages_model.PropertyTypePackage, container_module.PropertyRepository, packages_model.TypeContainer) +func AddSyncOnCommitColForPushMirror(x *xorm.Engine) error { + type PushMirror struct { + ID int64 `xorm:"pk autoincr"` + RepoID int64 `xorm:"INDEX"` + Repo *repo.Repository `xorm:"-"` + RemoteName string + + SyncOnCommit bool `xorm:"NOT NULL DEFAULT true"` + Interval time.Duration + CreatedUnix timeutil.TimeStamp `xorm:"created"` + LastUpdateUnix timeutil.TimeStamp `xorm:"INDEX last_update"` + LastError string `xorm:"text"` } - return err + + return x.Sync2(new(PushMirror)) } diff --git a/models/migrations/v1_17/v221.go b/models/migrations/v1_17/v221.go index 8a58b0f1056d3..904ddc5192935 100644 --- a/models/migrations/v1_17/v221.go +++ b/models/migrations/v1_17/v221.go @@ -4,71 +4,24 @@ package v1_17 //nolint import ( - "encoding/base32" - "fmt" - - "code.gitea.io/gitea/modules/timeutil" + packages_model "code.gitea.io/gitea/models/packages" + container_module "code.gitea.io/gitea/modules/packages/container" "xorm.io/xorm" + "xorm.io/xorm/schemas" ) -func StoreWebauthnCredentialIDAsBytes(x *xorm.Engine) error { - // Create webauthnCredential table - type webauthnCredential struct { - ID int64 `xorm:"pk autoincr"` - Name string - LowerName string `xorm:"unique(s)"` - UserID int64 `xorm:"INDEX unique(s)"` - CredentialID string `xorm:"INDEX VARCHAR(410)"` - // Note the lack of INDEX here - these will be created once the column is renamed in v223.go - CredentialIDBytes []byte `xorm:"VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022 - PublicKey []byte - AttestationType string - AAGUID []byte - SignCount uint32 `xorm:"BIGINT"` - CloneWarning bool - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - } - if err := x.Sync2(&webauthnCredential{}); err != nil { - return err - } - - var start int - creds := make([]*webauthnCredential, 0, 50) - for { - err := x.Select("id, credential_id").OrderBy("id").Limit(50, start).Find(&creds) - if err != nil { - return err - } - - err = func() error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return fmt.Errorf("unable to allow start session. Error: %w", err) - } - for _, cred := range creds { - cred.CredentialIDBytes, err = base32.HexEncoding.DecodeString(cred.CredentialID) - if err != nil { - return fmt.Errorf("unable to parse credential id %s for credential[%d]: %w", cred.CredentialID, cred.ID, err) - } - count, err := sess.ID(cred.ID).Cols("credential_id_bytes").Update(cred) - if count != 1 || err != nil { - return fmt.Errorf("unable to update credential id bytes for credential[%d]: %d,%w", cred.ID, count, err) - } - } - return sess.Commit() - }() - if err != nil { - return err - } - - if len(creds) < 50 { - break - } - start += 50 - creds = creds[:0] +func AddContainerRepositoryProperty(x *xorm.Engine) (err error) { + switch x.Dialect().URI().DBType { + case schemas.SQLITE: + _, err = x.Exec("INSERT INTO package_property (ref_type, ref_id, name, value) SELECT ?, p.id, ?, u.lower_name || '/' || p.lower_name FROM package p JOIN `user` u ON p.owner_id = u.id WHERE p.type = ?", + packages_model.PropertyTypePackage, container_module.PropertyRepository, packages_model.TypeContainer) + case schemas.MSSQL: + _, err = x.Exec("INSERT INTO package_property (ref_type, ref_id, name, value) SELECT ?, p.id, ?, u.lower_name + '/' + p.lower_name FROM package p JOIN `user` u ON p.owner_id = u.id WHERE p.type = ?", + packages_model.PropertyTypePackage, container_module.PropertyRepository, packages_model.TypeContainer) + default: + _, err = x.Exec("INSERT INTO package_property (ref_type, ref_id, name, value) SELECT ?, p.id, ?, CONCAT(u.lower_name, '/', p.lower_name) FROM package p JOIN `user` u ON p.owner_id = u.id WHERE p.type = ?", + packages_model.PropertyTypePackage, container_module.PropertyRepository, packages_model.TypeContainer) } - return nil + return err } diff --git a/models/migrations/v1_17/v222.go b/models/migrations/v1_17/v222.go index d1b77d845d164..8a58b0f1056d3 100644 --- a/models/migrations/v1_17/v222.go +++ b/models/migrations/v1_17/v222.go @@ -4,34 +4,15 @@ package v1_17 //nolint import ( - "context" + "encoding/base32" "fmt" - "code.gitea.io/gitea/models/migrations/base" "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" ) -func DropOldCredentialIDColumn(x *xorm.Engine) error { - // This migration maybe rerun so that we should check if it has been run - credentialIDExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id") - if err != nil { - return err - } - if !credentialIDExist { - // Column is already non-extant - return nil - } - credentialIDBytesExists, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id_bytes") - if err != nil { - return err - } - if !credentialIDBytesExists { - // looks like 221 hasn't properly run - return fmt.Errorf("webauthn_credential does not have a credential_id_bytes column... it is not safe to run this migration") - } - +func StoreWebauthnCredentialIDAsBytes(x *xorm.Engine) error { // Create webauthnCredential table type webauthnCredential struct { ID int64 `xorm:"pk autoincr"` @@ -39,7 +20,7 @@ func DropOldCredentialIDColumn(x *xorm.Engine) error { LowerName string `xorm:"unique(s)"` UserID int64 `xorm:"INDEX unique(s)"` CredentialID string `xorm:"INDEX VARCHAR(410)"` - // Note the lack of the INDEX on CredentialIDBytes - we will add this in v223.go + // Note the lack of INDEX here - these will be created once the column is renamed in v223.go CredentialIDBytes []byte `xorm:"VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022 PublicKey []byte AttestationType string @@ -53,12 +34,41 @@ func DropOldCredentialIDColumn(x *xorm.Engine) error { return err } - // Drop the old credential ID - sess := x.NewSession() - defer sess.Close() + var start int + creds := make([]*webauthnCredential, 0, 50) + for { + err := x.Select("id, credential_id").OrderBy("id").Limit(50, start).Find(&creds) + if err != nil { + return err + } + + err = func() error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return fmt.Errorf("unable to allow start session. Error: %w", err) + } + for _, cred := range creds { + cred.CredentialIDBytes, err = base32.HexEncoding.DecodeString(cred.CredentialID) + if err != nil { + return fmt.Errorf("unable to parse credential id %s for credential[%d]: %w", cred.CredentialID, cred.ID, err) + } + count, err := sess.ID(cred.ID).Cols("credential_id_bytes").Update(cred) + if count != 1 || err != nil { + return fmt.Errorf("unable to update credential id bytes for credential[%d]: %d,%w", cred.ID, count, err) + } + } + return sess.Commit() + }() + if err != nil { + return err + } - if err := base.DropTableColumns(sess, "webauthn_credential", "credential_id"); err != nil { - return fmt.Errorf("unable to drop old credentialID column: %w", err) + if len(creds) < 50 { + break + } + start += 50 + creds = creds[:0] } - return sess.Commit() + return nil } diff --git a/models/migrations/v1_17/v221_test.go b/models/migrations/v1_17/v222_test.go similarity index 100% rename from models/migrations/v1_17/v221_test.go rename to models/migrations/v1_17/v222_test.go diff --git a/models/migrations/v1_17/v223.go b/models/migrations/v1_17/v223.go index 6c61dbc53ae48..d1b77d845d164 100644 --- a/models/migrations/v1_17/v223.go +++ b/models/migrations/v1_17/v223.go @@ -8,96 +8,57 @@ import ( "fmt" "code.gitea.io/gitea/models/migrations/base" - "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" ) -func RenameCredentialIDBytes(x *xorm.Engine) error { +func DropOldCredentialIDColumn(x *xorm.Engine) error { // This migration maybe rerun so that we should check if it has been run credentialIDExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id") if err != nil { return err } - if credentialIDExist { - credentialIDBytesExists, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id_bytes") - if err != nil { - return err - } - if !credentialIDBytesExists { - return nil - } + if !credentialIDExist { + // Column is already non-extant + return nil } - - err = func() error { - // webauthnCredential table - type webauthnCredential struct { - ID int64 `xorm:"pk autoincr"` - Name string - LowerName string `xorm:"unique(s)"` - UserID int64 `xorm:"INDEX unique(s)"` - // Note the lack of INDEX here - CredentialIDBytes []byte `xorm:"VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022 - PublicKey []byte - AttestationType string - AAGUID []byte - SignCount uint32 `xorm:"BIGINT"` - CloneWarning bool - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - } - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - - if err := sess.Sync2(new(webauthnCredential)); err != nil { - return fmt.Errorf("error on Sync2: %w", err) - } - - if credentialIDExist { - // if both errors and message exist, drop message at first - if err := base.DropTableColumns(sess, "webauthn_credential", "credential_id"); err != nil { - return err - } - } - - switch { - case setting.Database.Type.IsMySQL(): - if _, err := sess.Exec("ALTER TABLE `webauthn_credential` CHANGE credential_id_bytes credential_id VARBINARY(1024)"); err != nil { - return err - } - case setting.Database.Type.IsMSSQL(): - if _, err := sess.Exec("sp_rename 'webauthn_credential.credential_id_bytes', 'credential_id', 'COLUMN'"); err != nil { - return err - } - default: - if _, err := sess.Exec("ALTER TABLE `webauthn_credential` RENAME COLUMN credential_id_bytes TO credential_id"); err != nil { - return err - } - } - return sess.Commit() - }() + credentialIDBytesExists, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id_bytes") if err != nil { return err } + if !credentialIDBytesExists { + // looks like 221 hasn't properly run + return fmt.Errorf("webauthn_credential does not have a credential_id_bytes column... it is not safe to run this migration") + } // Create webauthnCredential table type webauthnCredential struct { - ID int64 `xorm:"pk autoincr"` - Name string - LowerName string `xorm:"unique(s)"` - UserID int64 `xorm:"INDEX unique(s)"` - CredentialID []byte `xorm:"INDEX VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022 - PublicKey []byte - AttestationType string - AAGUID []byte - SignCount uint32 `xorm:"BIGINT"` - CloneWarning bool - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + ID int64 `xorm:"pk autoincr"` + Name string + LowerName string `xorm:"unique(s)"` + UserID int64 `xorm:"INDEX unique(s)"` + CredentialID string `xorm:"INDEX VARCHAR(410)"` + // Note the lack of the INDEX on CredentialIDBytes - we will add this in v223.go + CredentialIDBytes []byte `xorm:"VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022 + PublicKey []byte + AttestationType string + AAGUID []byte + SignCount uint32 `xorm:"BIGINT"` + CloneWarning bool + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + if err := x.Sync2(&webauthnCredential{}); err != nil { + return err + } + + // Drop the old credential ID + sess := x.NewSession() + defer sess.Close() + + if err := base.DropTableColumns(sess, "webauthn_credential", "credential_id"); err != nil { + return fmt.Errorf("unable to drop old credentialID column: %w", err) } - return x.Sync2(&webauthnCredential{}) + return sess.Commit() } diff --git a/models/migrations/v1_17/v224.go b/models/migrations/v1_17/v224.go new file mode 100644 index 0000000000000..6c61dbc53ae48 --- /dev/null +++ b/models/migrations/v1_17/v224.go @@ -0,0 +1,103 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_17 //nolint + +import ( + "context" + "fmt" + + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/timeutil" + + "xorm.io/xorm" +) + +func RenameCredentialIDBytes(x *xorm.Engine) error { + // This migration maybe rerun so that we should check if it has been run + credentialIDExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id") + if err != nil { + return err + } + if credentialIDExist { + credentialIDBytesExists, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webauthn_credential", "credential_id_bytes") + if err != nil { + return err + } + if !credentialIDBytesExists { + return nil + } + } + + err = func() error { + // webauthnCredential table + type webauthnCredential struct { + ID int64 `xorm:"pk autoincr"` + Name string + LowerName string `xorm:"unique(s)"` + UserID int64 `xorm:"INDEX unique(s)"` + // Note the lack of INDEX here + CredentialIDBytes []byte `xorm:"VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022 + PublicKey []byte + AttestationType string + AAGUID []byte + SignCount uint32 `xorm:"BIGINT"` + CloneWarning bool + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + + if err := sess.Sync2(new(webauthnCredential)); err != nil { + return fmt.Errorf("error on Sync2: %w", err) + } + + if credentialIDExist { + // if both errors and message exist, drop message at first + if err := base.DropTableColumns(sess, "webauthn_credential", "credential_id"); err != nil { + return err + } + } + + switch { + case setting.Database.Type.IsMySQL(): + if _, err := sess.Exec("ALTER TABLE `webauthn_credential` CHANGE credential_id_bytes credential_id VARBINARY(1024)"); err != nil { + return err + } + case setting.Database.Type.IsMSSQL(): + if _, err := sess.Exec("sp_rename 'webauthn_credential.credential_id_bytes', 'credential_id', 'COLUMN'"); err != nil { + return err + } + default: + if _, err := sess.Exec("ALTER TABLE `webauthn_credential` RENAME COLUMN credential_id_bytes TO credential_id"); err != nil { + return err + } + } + return sess.Commit() + }() + if err != nil { + return err + } + + // Create webauthnCredential table + type webauthnCredential struct { + ID int64 `xorm:"pk autoincr"` + Name string + LowerName string `xorm:"unique(s)"` + UserID int64 `xorm:"INDEX unique(s)"` + CredentialID []byte `xorm:"INDEX VARBINARY(1024)"` // CredentialID is at most 1023 bytes as per spec released 20 July 2022 + PublicKey []byte + AttestationType string + AAGUID []byte + SignCount uint32 `xorm:"BIGINT"` + CloneWarning bool + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + return x.Sync2(&webauthnCredential{}) +} diff --git a/models/migrations/v1_18/v224.go b/models/migrations/v1_18/v224.go deleted file mode 100644 index afd34a5db09cc..0000000000000 --- a/models/migrations/v1_18/v224.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_18 //nolint - -import ( - "xorm.io/xorm" -) - -func CreateUserBadgesTable(x *xorm.Engine) error { - type Badge struct { - ID int64 `xorm:"pk autoincr"` - Description string - ImageURL string - } - - type userBadge struct { - ID int64 `xorm:"pk autoincr"` - BadgeID int64 - UserID int64 `xorm:"INDEX"` - } - - if err := x.Sync2(new(Badge)); err != nil { - return err - } - return x.Sync2(new(userBadge)) -} diff --git a/models/migrations/v1_18/v225.go b/models/migrations/v1_18/v225.go index b0ac3777fc248..afd34a5db09cc 100644 --- a/models/migrations/v1_18/v225.go +++ b/models/migrations/v1_18/v225.go @@ -4,25 +4,24 @@ package v1_18 //nolint import ( - "code.gitea.io/gitea/modules/setting" - "xorm.io/xorm" ) -func AlterPublicGPGKeyContentFieldsToMediumText(x *xorm.Engine) error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err +func CreateUserBadgesTable(x *xorm.Engine) error { + type Badge struct { + ID int64 `xorm:"pk autoincr"` + Description string + ImageURL string } - if setting.Database.Type.IsMySQL() { - if _, err := sess.Exec("ALTER TABLE `gpg_key` CHANGE `content` `content` MEDIUMTEXT"); err != nil { - return err - } - if _, err := sess.Exec("ALTER TABLE `public_key` CHANGE `content` `content` MEDIUMTEXT"); err != nil { - return err - } + type userBadge struct { + ID int64 `xorm:"pk autoincr"` + BadgeID int64 + UserID int64 `xorm:"INDEX"` + } + + if err := x.Sync2(new(Badge)); err != nil { + return err } - return sess.Commit() + return x.Sync2(new(userBadge)) } diff --git a/models/migrations/v1_18/v226.go b/models/migrations/v1_18/v226.go index f87e24b11de9f..b0ac3777fc248 100644 --- a/models/migrations/v1_18/v226.go +++ b/models/migrations/v1_18/v226.go @@ -4,11 +4,25 @@ package v1_18 //nolint import ( - "xorm.io/builder" + "code.gitea.io/gitea/modules/setting" + "xorm.io/xorm" ) -func FixPackageSemverField(x *xorm.Engine) error { - _, err := x.Exec(builder.Update(builder.Eq{"semver_compatible": false}).From("`package`").Where(builder.In("`type`", "conan", "generic"))) - return err +func AlterPublicGPGKeyContentFieldsToMediumText(x *xorm.Engine) error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + + if setting.Database.Type.IsMySQL() { + if _, err := sess.Exec("ALTER TABLE `gpg_key` CHANGE `content` `content` MEDIUMTEXT"); err != nil { + return err + } + if _, err := sess.Exec("ALTER TABLE `public_key` CHANGE `content` `content` MEDIUMTEXT"); err != nil { + return err + } + } + return sess.Commit() } diff --git a/models/migrations/v1_18/v227.go b/models/migrations/v1_18/v227.go index c938028323fd7..f87e24b11de9f 100644 --- a/models/migrations/v1_18/v227.go +++ b/models/migrations/v1_18/v227.go @@ -4,60 +4,11 @@ package v1_18 //nolint import ( - "fmt" - "strconv" - - "code.gitea.io/gitea/modules/setting" - "code.gitea.io/gitea/modules/timeutil" - + "xorm.io/builder" "xorm.io/xorm" ) -type SystemSetting struct { - ID int64 `xorm:"pk autoincr"` - SettingKey string `xorm:"varchar(255) unique"` // ensure key is always lowercase - SettingValue string `xorm:"text"` - Version int `xorm:"version"` // prevent to override - Created timeutil.TimeStamp `xorm:"created"` - Updated timeutil.TimeStamp `xorm:"updated"` -} - -func insertSettingsIfNotExist(x *xorm.Engine, sysSettings []*SystemSetting) error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - for _, setting := range sysSettings { - exist, err := sess.Table("system_setting").Where("setting_key=?", setting.SettingKey).Exist() - if err != nil { - return err - } - if !exist { - if _, err := sess.Insert(setting); err != nil { - return err - } - } - } - return sess.Commit() -} - -func CreateSystemSettingsTable(x *xorm.Engine) error { - if err := x.Sync2(new(SystemSetting)); err != nil { - return fmt.Errorf("sync2: %w", err) - } - - // migrate xx to database - sysSettings := []*SystemSetting{ - { - SettingKey: "picture.disable_gravatar", - SettingValue: strconv.FormatBool(setting.DisableGravatar), - }, - { - SettingKey: "picture.enable_federated_avatar", - SettingValue: strconv.FormatBool(setting.EnableFederatedAvatar), - }, - } - - return insertSettingsIfNotExist(x, sysSettings) +func FixPackageSemverField(x *xorm.Engine) error { + _, err := x.Exec(builder.Update(builder.Eq{"semver_compatible": false}).From("`package`").Where(builder.In("`type`", "conan", "generic"))) + return err } diff --git a/models/migrations/v1_18/v228.go b/models/migrations/v1_18/v228.go index 58d3257528ae9..c938028323fd7 100644 --- a/models/migrations/v1_18/v228.go +++ b/models/migrations/v1_18/v228.go @@ -4,22 +4,60 @@ package v1_18 //nolint import ( + "fmt" + "strconv" + + "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" ) -func AddTeamInviteTable(x *xorm.Engine) error { - type TeamInvite struct { - ID int64 `xorm:"pk autoincr"` - Token string `xorm:"UNIQUE(token) INDEX NOT NULL DEFAULT ''"` - InviterID int64 `xorm:"NOT NULL DEFAULT 0"` - OrgID int64 `xorm:"INDEX NOT NULL DEFAULT 0"` - TeamID int64 `xorm:"UNIQUE(team_mail) INDEX NOT NULL DEFAULT 0"` - Email string `xorm:"UNIQUE(team_mail) NOT NULL DEFAULT ''"` - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` +type SystemSetting struct { + ID int64 `xorm:"pk autoincr"` + SettingKey string `xorm:"varchar(255) unique"` // ensure key is always lowercase + SettingValue string `xorm:"text"` + Version int `xorm:"version"` // prevent to override + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated"` +} + +func insertSettingsIfNotExist(x *xorm.Engine, sysSettings []*SystemSetting) error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + for _, setting := range sysSettings { + exist, err := sess.Table("system_setting").Where("setting_key=?", setting.SettingKey).Exist() + if err != nil { + return err + } + if !exist { + if _, err := sess.Insert(setting); err != nil { + return err + } + } + } + return sess.Commit() +} + +func CreateSystemSettingsTable(x *xorm.Engine) error { + if err := x.Sync2(new(SystemSetting)); err != nil { + return fmt.Errorf("sync2: %w", err) + } + + // migrate xx to database + sysSettings := []*SystemSetting{ + { + SettingKey: "picture.disable_gravatar", + SettingValue: strconv.FormatBool(setting.DisableGravatar), + }, + { + SettingKey: "picture.enable_federated_avatar", + SettingValue: strconv.FormatBool(setting.EnableFederatedAvatar), + }, } - return x.Sync2(new(TeamInvite)) + return insertSettingsIfNotExist(x, sysSettings) } diff --git a/models/migrations/v1_18/v229.go b/models/migrations/v1_18/v229.go index 10d9f350979f6..58d3257528ae9 100644 --- a/models/migrations/v1_18/v229.go +++ b/models/migrations/v1_18/v229.go @@ -4,43 +4,22 @@ package v1_18 //nolint import ( - "fmt" + "code.gitea.io/gitea/modules/timeutil" - "code.gitea.io/gitea/models/issues" - - "xorm.io/builder" "xorm.io/xorm" ) -func UpdateOpenMilestoneCounts(x *xorm.Engine) error { - var openMilestoneIDs []int64 - err := x.Table("milestone").Select("id").Where(builder.Neq{"is_closed": 1}).Find(&openMilestoneIDs) - if err != nil { - return fmt.Errorf("error selecting open milestone IDs: %w", err) - } - - for _, id := range openMilestoneIDs { - _, err := x.ID(id). - SetExpr("num_issues", builder.Select("count(*)").From("issue").Where( - builder.Eq{"milestone_id": id}, - )). - SetExpr("num_closed_issues", builder.Select("count(*)").From("issue").Where( - builder.Eq{ - "milestone_id": id, - "is_closed": true, - }, - )). - Update(&issues.Milestone{}) - if err != nil { - return fmt.Errorf("error updating issue counts in milestone %d: %w", id, err) - } - _, err = x.Exec("UPDATE `milestone` SET completeness=100*num_closed_issues/(CASE WHEN num_issues > 0 THEN num_issues ELSE 1 END) WHERE id=?", - id, - ) - if err != nil { - return fmt.Errorf("error setting completeness on milestone %d: %w", id, err) - } +func AddTeamInviteTable(x *xorm.Engine) error { + type TeamInvite struct { + ID int64 `xorm:"pk autoincr"` + Token string `xorm:"UNIQUE(token) INDEX NOT NULL DEFAULT ''"` + InviterID int64 `xorm:"NOT NULL DEFAULT 0"` + OrgID int64 `xorm:"INDEX NOT NULL DEFAULT 0"` + TeamID int64 `xorm:"UNIQUE(team_mail) INDEX NOT NULL DEFAULT 0"` + Email string `xorm:"UNIQUE(team_mail) NOT NULL DEFAULT ''"` + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } - return nil + return x.Sync2(new(TeamInvite)) } diff --git a/models/migrations/v1_18/v229_test.go b/models/migrations/v1_18/v229_test.go deleted file mode 100644 index d489328c00056..0000000000000 --- a/models/migrations/v1_18/v229_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_18 //nolint - -import ( - "testing" - - "code.gitea.io/gitea/models/issues" - "code.gitea.io/gitea/models/migrations/base" - - "github.com/stretchr/testify/assert" -) - -func Test_UpdateOpenMilestoneCounts(t *testing.T) { - type ExpectedMilestone issues.Milestone - - // Prepare and load the testing database - x, deferable := base.PrepareTestEnv(t, 0, new(issues.Milestone), new(ExpectedMilestone), new(issues.Issue)) - defer deferable() - if x == nil || t.Failed() { - return - } - - if err := UpdateOpenMilestoneCounts(x); err != nil { - assert.NoError(t, err) - return - } - - expected := []ExpectedMilestone{} - if err := x.Table("expected_milestone").Asc("id").Find(&expected); !assert.NoError(t, err) { - return - } - - got := []issues.Milestone{} - if err := x.Table("milestone").Asc("id").Find(&got); !assert.NoError(t, err) { - return - } - - for i, e := range expected { - got := got[i] - assert.Equal(t, e.ID, got.ID) - assert.Equal(t, e.NumIssues, got.NumIssues) - assert.Equal(t, e.NumClosedIssues, got.NumClosedIssues) - } -} diff --git a/models/migrations/v1_18/v230.go b/models/migrations/v1_18/v230.go index cf94926be17ca..10d9f350979f6 100644 --- a/models/migrations/v1_18/v230.go +++ b/models/migrations/v1_18/v230.go @@ -4,14 +4,43 @@ package v1_18 //nolint import ( + "fmt" + + "code.gitea.io/gitea/models/issues" + + "xorm.io/builder" "xorm.io/xorm" ) -// AddConfidentialColumnToOAuth2ApplicationTable: add ConfidentialClient column, setting existing rows to true -func AddConfidentialClientColumnToOAuth2ApplicationTable(x *xorm.Engine) error { - type OAuth2Application struct { - ConfidentialClient bool `xorm:"NOT NULL DEFAULT TRUE"` +func UpdateOpenMilestoneCounts(x *xorm.Engine) error { + var openMilestoneIDs []int64 + err := x.Table("milestone").Select("id").Where(builder.Neq{"is_closed": 1}).Find(&openMilestoneIDs) + if err != nil { + return fmt.Errorf("error selecting open milestone IDs: %w", err) + } + + for _, id := range openMilestoneIDs { + _, err := x.ID(id). + SetExpr("num_issues", builder.Select("count(*)").From("issue").Where( + builder.Eq{"milestone_id": id}, + )). + SetExpr("num_closed_issues", builder.Select("count(*)").From("issue").Where( + builder.Eq{ + "milestone_id": id, + "is_closed": true, + }, + )). + Update(&issues.Milestone{}) + if err != nil { + return fmt.Errorf("error updating issue counts in milestone %d: %w", id, err) + } + _, err = x.Exec("UPDATE `milestone` SET completeness=100*num_closed_issues/(CASE WHEN num_issues > 0 THEN num_issues ELSE 1 END) WHERE id=?", + id, + ) + if err != nil { + return fmt.Errorf("error setting completeness on milestone %d: %w", id, err) + } } - return x.Sync(new(OAuth2Application)) + return nil } diff --git a/models/migrations/v1_18/v230_test.go b/models/migrations/v1_18/v230_test.go index 308f3a50231f6..d489328c00056 100644 --- a/models/migrations/v1_18/v230_test.go +++ b/models/migrations/v1_18/v230_test.go @@ -6,42 +6,41 @@ package v1_18 //nolint import ( "testing" + "code.gitea.io/gitea/models/issues" "code.gitea.io/gitea/models/migrations/base" "github.com/stretchr/testify/assert" ) -func Test_AddConfidentialClientColumnToOAuth2ApplicationTable(t *testing.T) { - // premigration - type OAuth2Application struct { - ID int64 - } +func Test_UpdateOpenMilestoneCounts(t *testing.T) { + type ExpectedMilestone issues.Milestone // Prepare and load the testing database - x, deferable := base.PrepareTestEnv(t, 0, new(OAuth2Application)) + x, deferable := base.PrepareTestEnv(t, 0, new(issues.Milestone), new(ExpectedMilestone), new(issues.Issue)) defer deferable() if x == nil || t.Failed() { return } - if err := AddConfidentialClientColumnToOAuth2ApplicationTable(x); err != nil { + if err := UpdateOpenMilestoneCounts(x); err != nil { assert.NoError(t, err) return } - // postmigration - type ExpectedOAuth2Application struct { - ID int64 - ConfidentialClient bool + expected := []ExpectedMilestone{} + if err := x.Table("expected_milestone").Asc("id").Find(&expected); !assert.NoError(t, err) { + return } - got := []ExpectedOAuth2Application{} - if err := x.Table("o_auth2_application").Select("id, confidential_client").Find(&got); !assert.NoError(t, err) { + got := []issues.Milestone{} + if err := x.Table("milestone").Asc("id").Find(&got); !assert.NoError(t, err) { return } - assert.NotEmpty(t, got) - for _, e := range got { - assert.True(t, e.ConfidentialClient) + for i, e := range expected { + got := got[i] + assert.Equal(t, e.ID, got.ID) + assert.Equal(t, e.NumIssues, got.NumIssues) + assert.Equal(t, e.NumClosedIssues, got.NumClosedIssues) } } diff --git a/models/migrations/v1_18/v231.go b/models/migrations/v1_18/v231.go new file mode 100644 index 0000000000000..cf94926be17ca --- /dev/null +++ b/models/migrations/v1_18/v231.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_18 //nolint + +import ( + "xorm.io/xorm" +) + +// AddConfidentialColumnToOAuth2ApplicationTable: add ConfidentialClient column, setting existing rows to true +func AddConfidentialClientColumnToOAuth2ApplicationTable(x *xorm.Engine) error { + type OAuth2Application struct { + ConfidentialClient bool `xorm:"NOT NULL DEFAULT TRUE"` + } + + return x.Sync(new(OAuth2Application)) +} diff --git a/models/migrations/v1_18/v231_test.go b/models/migrations/v1_18/v231_test.go new file mode 100644 index 0000000000000..308f3a50231f6 --- /dev/null +++ b/models/migrations/v1_18/v231_test.go @@ -0,0 +1,47 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_18 //nolint + +import ( + "testing" + + "code.gitea.io/gitea/models/migrations/base" + + "github.com/stretchr/testify/assert" +) + +func Test_AddConfidentialClientColumnToOAuth2ApplicationTable(t *testing.T) { + // premigration + type OAuth2Application struct { + ID int64 + } + + // Prepare and load the testing database + x, deferable := base.PrepareTestEnv(t, 0, new(OAuth2Application)) + defer deferable() + if x == nil || t.Failed() { + return + } + + if err := AddConfidentialClientColumnToOAuth2ApplicationTable(x); err != nil { + assert.NoError(t, err) + return + } + + // postmigration + type ExpectedOAuth2Application struct { + ID int64 + ConfidentialClient bool + } + + got := []ExpectedOAuth2Application{} + if err := x.Table("o_auth2_application").Select("id, confidential_client").Find(&got); !assert.NoError(t, err) { + return + } + + assert.NotEmpty(t, got) + for _, e := range got { + assert.True(t, e.ConfidentialClient) + } +} diff --git a/models/migrations/v1_19/v231.go b/models/migrations/v1_19/v231.go deleted file mode 100644 index 79e46132f0a3c..0000000000000 --- a/models/migrations/v1_19/v231.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_19 //nolint - -import ( - "xorm.io/xorm" -) - -func AddIndexForHookTask(x *xorm.Engine) error { - type HookTask struct { - ID int64 `xorm:"pk autoincr"` - HookID int64 `xorm:"index"` - UUID string `xorm:"unique"` - } - - return x.Sync(new(HookTask)) -} diff --git a/models/migrations/v1_19/v232.go b/models/migrations/v1_19/v232.go index 9caf587c1e9ca..79e46132f0a3c 100644 --- a/models/migrations/v1_19/v232.go +++ b/models/migrations/v1_19/v232.go @@ -4,22 +4,15 @@ package v1_19 //nolint import ( - "code.gitea.io/gitea/modules/setting" - "xorm.io/xorm" ) -func AlterPackageVersionMetadataToLongText(x *xorm.Engine) error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err +func AddIndexForHookTask(x *xorm.Engine) error { + type HookTask struct { + ID int64 `xorm:"pk autoincr"` + HookID int64 `xorm:"index"` + UUID string `xorm:"unique"` } - if setting.Database.Type.IsMySQL() { - if _, err := sess.Exec("ALTER TABLE `package_version` MODIFY COLUMN `metadata_json` LONGTEXT"); err != nil { - return err - } - } - return sess.Commit() + return x.Sync(new(HookTask)) } diff --git a/models/migrations/v1_19/v233.go b/models/migrations/v1_19/v233.go index ba4cd8e20b995..9caf587c1e9ca 100644 --- a/models/migrations/v1_19/v233.go +++ b/models/migrations/v1_19/v233.go @@ -4,178 +4,22 @@ package v1_19 //nolint import ( - "fmt" - - "code.gitea.io/gitea/modules/json" - "code.gitea.io/gitea/modules/secret" "code.gitea.io/gitea/modules/setting" - api "code.gitea.io/gitea/modules/structs" - "xorm.io/builder" "xorm.io/xorm" ) -func batchProcess[T any](x *xorm.Engine, buf []T, query func(limit, start int) *xorm.Session, process func(*xorm.Session, T) error) error { - size := cap(buf) - start := 0 - for { - err := query(size, start).Find(&buf) - if err != nil { - return err - } - if len(buf) == 0 { - return nil - } - - err = func() error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return fmt.Errorf("unable to allow start session. Error: %w", err) - } - for _, record := range buf { - if err := process(sess, record); err != nil { - return err - } - } - return sess.Commit() - }() - if err != nil { - return err - } - - if len(buf) < size { - return nil - } - start += size - buf = buf[:0] - } -} - -func AddHeaderAuthorizationEncryptedColWebhook(x *xorm.Engine) error { - // Add the column to the table - type Webhook struct { - ID int64 `xorm:"pk autoincr"` - Type string `xorm:"VARCHAR(16) 'type'"` - Meta string `xorm:"TEXT"` // store hook-specific attributes - - // HeaderAuthorizationEncrypted should be accessed using HeaderAuthorization() and SetHeaderAuthorization() - HeaderAuthorizationEncrypted string `xorm:"TEXT"` - } - err := x.Sync(new(Webhook)) - if err != nil { - return err - } - - // Migrate the matrix webhooks - - type MatrixMeta struct { - HomeserverURL string `json:"homeserver_url"` - Room string `json:"room_id"` - MessageType int `json:"message_type"` - } - type MatrixMetaWithAccessToken struct { - MatrixMeta - AccessToken string `json:"access_token"` - } - - err = batchProcess(x, - make([]*Webhook, 0, 50), - func(limit, start int) *xorm.Session { - return x.Where("type=?", "matrix").OrderBy("id").Limit(limit, start) - }, - func(sess *xorm.Session, hook *Webhook) error { - // retrieve token from meta - var withToken MatrixMetaWithAccessToken - err := json.Unmarshal([]byte(hook.Meta), &withToken) - if err != nil { - return fmt.Errorf("unable to unmarshal matrix meta for webhook[id=%d]: %w", hook.ID, err) - } - if withToken.AccessToken == "" { - return nil - } - - // encrypt token - authorization := "Bearer " + withToken.AccessToken - hook.HeaderAuthorizationEncrypted, err = secret.EncryptSecret(setting.SecretKey, authorization) - if err != nil { - return fmt.Errorf("unable to encrypt access token for webhook[id=%d]: %w", hook.ID, err) - } - - // remove token from meta - withoutToken, err := json.Marshal(withToken.MatrixMeta) - if err != nil { - return fmt.Errorf("unable to marshal matrix meta for webhook[id=%d]: %w", hook.ID, err) - } - hook.Meta = string(withoutToken) - - // save in database - count, err := sess.ID(hook.ID).Cols("meta", "header_authorization_encrypted").Update(hook) - if count != 1 || err != nil { - return fmt.Errorf("unable to update header_authorization_encrypted for webhook[id=%d]: %d,%w", hook.ID, count, err) - } - return nil - }) - if err != nil { +func AlterPackageVersionMetadataToLongText(x *xorm.Engine) error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { return err } - // Remove access_token from HookTask - - type HookTask struct { - ID int64 `xorm:"pk autoincr"` - HookID int64 - PayloadContent string `xorm:"LONGTEXT"` - } - - type MatrixPayloadSafe struct { - Body string `json:"body"` - MsgType string `json:"msgtype"` - Format string `json:"format"` - FormattedBody string `json:"formatted_body"` - Commits []*api.PayloadCommit `json:"io.gitea.commits,omitempty"` - } - type MatrixPayloadUnsafe struct { - MatrixPayloadSafe - AccessToken string `json:"access_token"` - } - - err = batchProcess(x, - make([]*HookTask, 0, 50), - func(limit, start int) *xorm.Session { - return x.Where(builder.And( - builder.In("hook_id", builder.Select("id").From("webhook").Where(builder.Eq{"type": "matrix"})), - builder.Like{"payload_content", "access_token"}, - )).OrderBy("id").Limit(limit, 0) // ignore the provided "start", since other payload were already converted and don't contain 'payload_content' anymore - }, - func(sess *xorm.Session, hookTask *HookTask) error { - // retrieve token from payload_content - var withToken MatrixPayloadUnsafe - err := json.Unmarshal([]byte(hookTask.PayloadContent), &withToken) - if err != nil { - return fmt.Errorf("unable to unmarshal payload_content for hook_task[id=%d]: %w", hookTask.ID, err) - } - if withToken.AccessToken == "" { - return nil - } - - // remove token from payload_content - withoutToken, err := json.Marshal(withToken.MatrixPayloadSafe) - if err != nil { - return fmt.Errorf("unable to marshal payload_content for hook_task[id=%d]: %w", hookTask.ID, err) - } - hookTask.PayloadContent = string(withoutToken) - - // save in database - count, err := sess.ID(hookTask.ID).Cols("payload_content").Update(hookTask) - if count != 1 || err != nil { - return fmt.Errorf("unable to update payload_content for hook_task[id=%d]: %d,%w", hookTask.ID, count, err) - } - return nil - }) - if err != nil { - return err + if setting.Database.Type.IsMySQL() { + if _, err := sess.Exec("ALTER TABLE `package_version` MODIFY COLUMN `metadata_json` LONGTEXT"); err != nil { + return err + } } - - return nil + return sess.Commit() } diff --git a/models/migrations/v1_19/v234.go b/models/migrations/v1_19/v234.go index 4e98a2b7a9fdc..ba4cd8e20b995 100644 --- a/models/migrations/v1_19/v234.go +++ b/models/migrations/v1_19/v234.go @@ -4,25 +4,178 @@ package v1_19 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" + "fmt" + "code.gitea.io/gitea/modules/json" + "code.gitea.io/gitea/modules/secret" + "code.gitea.io/gitea/modules/setting" + api "code.gitea.io/gitea/modules/structs" + + "xorm.io/builder" "xorm.io/xorm" ) -func CreatePackageCleanupRuleTable(x *xorm.Engine) error { - type PackageCleanupRule struct { - ID int64 `xorm:"pk autoincr"` - Enabled bool `xorm:"INDEX NOT NULL DEFAULT false"` - OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL DEFAULT 0"` - Type string `xorm:"UNIQUE(s) INDEX NOT NULL"` - KeepCount int `xorm:"NOT NULL DEFAULT 0"` - KeepPattern string `xorm:"NOT NULL DEFAULT ''"` - RemoveDays int `xorm:"NOT NULL DEFAULT 0"` - RemovePattern string `xorm:"NOT NULL DEFAULT ''"` - MatchFullName bool `xorm:"NOT NULL DEFAULT false"` - CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL DEFAULT 0"` - UpdatedUnix timeutil.TimeStamp `xorm:"updated NOT NULL DEFAULT 0"` - } - - return x.Sync2(new(PackageCleanupRule)) +func batchProcess[T any](x *xorm.Engine, buf []T, query func(limit, start int) *xorm.Session, process func(*xorm.Session, T) error) error { + size := cap(buf) + start := 0 + for { + err := query(size, start).Find(&buf) + if err != nil { + return err + } + if len(buf) == 0 { + return nil + } + + err = func() error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return fmt.Errorf("unable to allow start session. Error: %w", err) + } + for _, record := range buf { + if err := process(sess, record); err != nil { + return err + } + } + return sess.Commit() + }() + if err != nil { + return err + } + + if len(buf) < size { + return nil + } + start += size + buf = buf[:0] + } +} + +func AddHeaderAuthorizationEncryptedColWebhook(x *xorm.Engine) error { + // Add the column to the table + type Webhook struct { + ID int64 `xorm:"pk autoincr"` + Type string `xorm:"VARCHAR(16) 'type'"` + Meta string `xorm:"TEXT"` // store hook-specific attributes + + // HeaderAuthorizationEncrypted should be accessed using HeaderAuthorization() and SetHeaderAuthorization() + HeaderAuthorizationEncrypted string `xorm:"TEXT"` + } + err := x.Sync(new(Webhook)) + if err != nil { + return err + } + + // Migrate the matrix webhooks + + type MatrixMeta struct { + HomeserverURL string `json:"homeserver_url"` + Room string `json:"room_id"` + MessageType int `json:"message_type"` + } + type MatrixMetaWithAccessToken struct { + MatrixMeta + AccessToken string `json:"access_token"` + } + + err = batchProcess(x, + make([]*Webhook, 0, 50), + func(limit, start int) *xorm.Session { + return x.Where("type=?", "matrix").OrderBy("id").Limit(limit, start) + }, + func(sess *xorm.Session, hook *Webhook) error { + // retrieve token from meta + var withToken MatrixMetaWithAccessToken + err := json.Unmarshal([]byte(hook.Meta), &withToken) + if err != nil { + return fmt.Errorf("unable to unmarshal matrix meta for webhook[id=%d]: %w", hook.ID, err) + } + if withToken.AccessToken == "" { + return nil + } + + // encrypt token + authorization := "Bearer " + withToken.AccessToken + hook.HeaderAuthorizationEncrypted, err = secret.EncryptSecret(setting.SecretKey, authorization) + if err != nil { + return fmt.Errorf("unable to encrypt access token for webhook[id=%d]: %w", hook.ID, err) + } + + // remove token from meta + withoutToken, err := json.Marshal(withToken.MatrixMeta) + if err != nil { + return fmt.Errorf("unable to marshal matrix meta for webhook[id=%d]: %w", hook.ID, err) + } + hook.Meta = string(withoutToken) + + // save in database + count, err := sess.ID(hook.ID).Cols("meta", "header_authorization_encrypted").Update(hook) + if count != 1 || err != nil { + return fmt.Errorf("unable to update header_authorization_encrypted for webhook[id=%d]: %d,%w", hook.ID, count, err) + } + return nil + }) + if err != nil { + return err + } + + // Remove access_token from HookTask + + type HookTask struct { + ID int64 `xorm:"pk autoincr"` + HookID int64 + PayloadContent string `xorm:"LONGTEXT"` + } + + type MatrixPayloadSafe struct { + Body string `json:"body"` + MsgType string `json:"msgtype"` + Format string `json:"format"` + FormattedBody string `json:"formatted_body"` + Commits []*api.PayloadCommit `json:"io.gitea.commits,omitempty"` + } + type MatrixPayloadUnsafe struct { + MatrixPayloadSafe + AccessToken string `json:"access_token"` + } + + err = batchProcess(x, + make([]*HookTask, 0, 50), + func(limit, start int) *xorm.Session { + return x.Where(builder.And( + builder.In("hook_id", builder.Select("id").From("webhook").Where(builder.Eq{"type": "matrix"})), + builder.Like{"payload_content", "access_token"}, + )).OrderBy("id").Limit(limit, 0) // ignore the provided "start", since other payload were already converted and don't contain 'payload_content' anymore + }, + func(sess *xorm.Session, hookTask *HookTask) error { + // retrieve token from payload_content + var withToken MatrixPayloadUnsafe + err := json.Unmarshal([]byte(hookTask.PayloadContent), &withToken) + if err != nil { + return fmt.Errorf("unable to unmarshal payload_content for hook_task[id=%d]: %w", hookTask.ID, err) + } + if withToken.AccessToken == "" { + return nil + } + + // remove token from payload_content + withoutToken, err := json.Marshal(withToken.MatrixPayloadSafe) + if err != nil { + return fmt.Errorf("unable to marshal payload_content for hook_task[id=%d]: %w", hookTask.ID, err) + } + hookTask.PayloadContent = string(withoutToken) + + // save in database + count, err := sess.ID(hookTask.ID).Cols("payload_content").Update(hookTask) + if count != 1 || err != nil { + return fmt.Errorf("unable to update payload_content for hook_task[id=%d]: %d,%w", hookTask.ID, count, err) + } + return nil + }) + if err != nil { + return err + } + + return nil } diff --git a/models/migrations/v1_19/v233_test.go b/models/migrations/v1_19/v234_test.go similarity index 100% rename from models/migrations/v1_19/v233_test.go rename to models/migrations/v1_19/v234_test.go diff --git a/models/migrations/v1_19/v235.go b/models/migrations/v1_19/v235.go index 3715de3920c89..4e98a2b7a9fdc 100644 --- a/models/migrations/v1_19/v235.go +++ b/models/migrations/v1_19/v235.go @@ -4,13 +4,25 @@ package v1_19 //nolint import ( + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func AddIndexForAccessToken(x *xorm.Engine) error { - type AccessToken struct { - TokenLastEight string `xorm:"INDEX token_last_eight"` +func CreatePackageCleanupRuleTable(x *xorm.Engine) error { + type PackageCleanupRule struct { + ID int64 `xorm:"pk autoincr"` + Enabled bool `xorm:"INDEX NOT NULL DEFAULT false"` + OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL DEFAULT 0"` + Type string `xorm:"UNIQUE(s) INDEX NOT NULL"` + KeepCount int `xorm:"NOT NULL DEFAULT 0"` + KeepPattern string `xorm:"NOT NULL DEFAULT ''"` + RemoveDays int `xorm:"NOT NULL DEFAULT 0"` + RemovePattern string `xorm:"NOT NULL DEFAULT ''"` + MatchFullName bool `xorm:"NOT NULL DEFAULT false"` + CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL DEFAULT 0"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated NOT NULL DEFAULT 0"` } - return x.Sync(new(AccessToken)) + return x.Sync2(new(PackageCleanupRule)) } diff --git a/models/migrations/v1_19/v236.go b/models/migrations/v1_19/v236.go index f172a85b1fc93..3715de3920c89 100644 --- a/models/migrations/v1_19/v236.go +++ b/models/migrations/v1_19/v236.go @@ -4,20 +4,13 @@ package v1_19 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func CreateSecretsTable(x *xorm.Engine) error { - type Secret struct { - ID int64 - OwnerID int64 `xorm:"INDEX UNIQUE(owner_repo_name) NOT NULL"` - RepoID int64 `xorm:"INDEX UNIQUE(owner_repo_name) NOT NULL DEFAULT 0"` - Name string `xorm:"UNIQUE(owner_repo_name) NOT NULL"` - Data string `xorm:"LONGTEXT"` - CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"` +func AddIndexForAccessToken(x *xorm.Engine) error { + type AccessToken struct { + TokenLastEight string `xorm:"INDEX token_last_eight"` } - return x.Sync(new(Secret)) + return x.Sync(new(AccessToken)) } diff --git a/models/migrations/v1_19/v237.go b/models/migrations/v1_19/v237.go index b23c765aa5aac..f172a85b1fc93 100644 --- a/models/migrations/v1_19/v237.go +++ b/models/migrations/v1_19/v237.go @@ -4,12 +4,20 @@ package v1_19 //nolint import ( + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func DropForeignReferenceTable(x *xorm.Engine) error { - // Drop the table introduced in `v211`, it's considered badly designed and doesn't look like to be used. - // See: https://github.com/go-gitea/gitea/issues/21086#issuecomment-1318217453 - type ForeignReference struct{} - return x.DropTables(new(ForeignReference)) +func CreateSecretsTable(x *xorm.Engine) error { + type Secret struct { + ID int64 + OwnerID int64 `xorm:"INDEX UNIQUE(owner_repo_name) NOT NULL"` + RepoID int64 `xorm:"INDEX UNIQUE(owner_repo_name) NOT NULL DEFAULT 0"` + Name string `xorm:"UNIQUE(owner_repo_name) NOT NULL"` + Data string `xorm:"LONGTEXT"` + CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"` + } + + return x.Sync(new(Secret)) } diff --git a/models/migrations/v1_19/v238.go b/models/migrations/v1_19/v238.go index 266e6cea58a8a..b23c765aa5aac 100644 --- a/models/migrations/v1_19/v238.go +++ b/models/migrations/v1_19/v238.go @@ -4,24 +4,12 @@ package v1_19 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -// AddUpdatedUnixToLFSMetaObject adds an updated column to the LFSMetaObject to allow for garbage collection -func AddUpdatedUnixToLFSMetaObject(x *xorm.Engine) error { +func DropForeignReferenceTable(x *xorm.Engine) error { // Drop the table introduced in `v211`, it's considered badly designed and doesn't look like to be used. // See: https://github.com/go-gitea/gitea/issues/21086#issuecomment-1318217453 - // LFSMetaObject stores metadata for LFS tracked files. - type LFSMetaObject struct { - ID int64 `xorm:"pk autoincr"` - Oid string `json:"oid" xorm:"UNIQUE(s) INDEX NOT NULL"` - Size int64 `json:"size" xorm:"NOT NULL"` - RepositoryID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` - CreatedUnix timeutil.TimeStamp `xorm:"created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - } - - return x.Sync(new(LFSMetaObject)) + type ForeignReference struct{} + return x.DropTables(new(ForeignReference)) } diff --git a/models/migrations/v1_19/v239.go b/models/migrations/v1_19/v239.go index 10076f2401696..266e6cea58a8a 100644 --- a/models/migrations/v1_19/v239.go +++ b/models/migrations/v1_19/v239.go @@ -4,19 +4,24 @@ package v1_19 //nolint import ( + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func AddScopeForAccessTokens(x *xorm.Engine) error { - type AccessToken struct { - Scope string - } - - if err := x.Sync(new(AccessToken)); err != nil { - return err +// AddUpdatedUnixToLFSMetaObject adds an updated column to the LFSMetaObject to allow for garbage collection +func AddUpdatedUnixToLFSMetaObject(x *xorm.Engine) error { + // Drop the table introduced in `v211`, it's considered badly designed and doesn't look like to be used. + // See: https://github.com/go-gitea/gitea/issues/21086#issuecomment-1318217453 + // LFSMetaObject stores metadata for LFS tracked files. + type LFSMetaObject struct { + ID int64 `xorm:"pk autoincr"` + Oid string `json:"oid" xorm:"UNIQUE(s) INDEX NOT NULL"` + Size int64 `json:"size" xorm:"NOT NULL"` + RepositoryID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` + CreatedUnix timeutil.TimeStamp `xorm:"created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } - // all previous tokens have `all` and `sudo` scopes - _, err := x.Exec("UPDATE access_token SET scope = ? WHERE scope IS NULL OR scope = ''", "all,sudo") - return err + return x.Sync(new(LFSMetaObject)) } diff --git a/models/migrations/v1_19/v240.go b/models/migrations/v1_19/v240.go index 4505f86299556..10076f2401696 100644 --- a/models/migrations/v1_19/v240.go +++ b/models/migrations/v1_19/v240.go @@ -4,173 +4,19 @@ package v1_19 //nolint import ( - "code.gitea.io/gitea/models/db" - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func AddActionsTables(x *xorm.Engine) error { - type ActionRunner struct { - ID int64 - UUID string `xorm:"CHAR(36) UNIQUE"` - Name string `xorm:"VARCHAR(255)"` - OwnerID int64 `xorm:"index"` // org level runner, 0 means system - RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global - Description string `xorm:"TEXT"` - Base int // 0 native 1 docker 2 virtual machine - RepoRange string // glob match which repositories could use this runner - - Token string `xorm:"-"` - TokenHash string `xorm:"UNIQUE"` // sha256 of token - TokenSalt string - // TokenLastEight string `xorm:"token_last_eight"` // it's unnecessary because we don't find runners by token - - LastOnline timeutil.TimeStamp `xorm:"index"` - LastActive timeutil.TimeStamp `xorm:"index"` - - // Store OS and Artch. - AgentLabels []string - // Store custom labes use defined. - CustomLabels []string - - Created timeutil.TimeStamp `xorm:"created"` - Updated timeutil.TimeStamp `xorm:"updated"` - Deleted timeutil.TimeStamp `xorm:"deleted"` - } - - type ActionRunnerToken struct { - ID int64 - Token string `xorm:"UNIQUE"` - OwnerID int64 `xorm:"index"` // org level runner, 0 means system - RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global - IsActive bool - - Created timeutil.TimeStamp `xorm:"created"` - Updated timeutil.TimeStamp `xorm:"updated"` - Deleted timeutil.TimeStamp `xorm:"deleted"` - } - - type ActionRun struct { - ID int64 - Title string - RepoID int64 `xorm:"index unique(repo_index)"` - OwnerID int64 `xorm:"index"` - WorkflowID string `xorm:"index"` // the name of workflow file - Index int64 `xorm:"index unique(repo_index)"` // a unique number for each run of a repository - TriggerUserID int64 - Ref string - CommitSHA string - Event string - IsForkPullRequest bool - EventPayload string `xorm:"LONGTEXT"` - Status int `xorm:"index"` - Started timeutil.TimeStamp - Stopped timeutil.TimeStamp - Created timeutil.TimeStamp `xorm:"created"` - Updated timeutil.TimeStamp `xorm:"updated"` - } - - type ActionRunJob struct { - ID int64 - RunID int64 `xorm:"index"` - RepoID int64 `xorm:"index"` - OwnerID int64 `xorm:"index"` - CommitSHA string `xorm:"index"` - IsForkPullRequest bool - Name string `xorm:"VARCHAR(255)"` - Attempt int64 - WorkflowPayload []byte - JobID string `xorm:"VARCHAR(255)"` // job id in workflow, not job's id - Needs []string `xorm:"JSON TEXT"` - RunsOn []string `xorm:"JSON TEXT"` - TaskID int64 // the latest task of the job - Status int `xorm:"index"` - Started timeutil.TimeStamp - Stopped timeutil.TimeStamp - Created timeutil.TimeStamp `xorm:"created"` - Updated timeutil.TimeStamp `xorm:"updated index"` - } - - type Repository struct { - NumActionRuns int `xorm:"NOT NULL DEFAULT 0"` - NumClosedActionRuns int `xorm:"NOT NULL DEFAULT 0"` - } - - type ActionRunIndex db.ResourceIndex - - type ActionTask struct { - ID int64 - JobID int64 - Attempt int64 - RunnerID int64 `xorm:"index"` - Status int `xorm:"index"` - Started timeutil.TimeStamp `xorm:"index"` - Stopped timeutil.TimeStamp - - RepoID int64 `xorm:"index"` - OwnerID int64 `xorm:"index"` - CommitSHA string `xorm:"index"` - IsForkPullRequest bool - - TokenHash string `xorm:"UNIQUE"` // sha256 of token - TokenSalt string - TokenLastEight string `xorm:"index token_last_eight"` - - LogFilename string // file name of log - LogInStorage bool // read log from database or from storage - LogLength int64 // lines count - LogSize int64 // blob size - LogIndexes []int64 `xorm:"LONGBLOB"` // line number to offset - LogExpired bool // files that are too old will be deleted - - Created timeutil.TimeStamp `xorm:"created"` - Updated timeutil.TimeStamp `xorm:"updated index"` - } - - type ActionTaskStep struct { - ID int64 - Name string `xorm:"VARCHAR(255)"` - TaskID int64 `xorm:"index unique(task_index)"` - Index int64 `xorm:"index unique(task_index)"` - RepoID int64 `xorm:"index"` - Status int `xorm:"index"` - LogIndex int64 - LogLength int64 - Started timeutil.TimeStamp - Stopped timeutil.TimeStamp - Created timeutil.TimeStamp `xorm:"created"` - Updated timeutil.TimeStamp `xorm:"updated"` - } - - type dbfsMeta struct { - ID int64 `xorm:"pk autoincr"` - FullPath string `xorm:"VARCHAR(500) UNIQUE NOT NULL"` - BlockSize int64 `xorm:"BIGINT NOT NULL"` - FileSize int64 `xorm:"BIGINT NOT NULL"` - CreateTimestamp int64 `xorm:"BIGINT NOT NULL"` - ModifyTimestamp int64 `xorm:"BIGINT NOT NULL"` +func AddScopeForAccessTokens(x *xorm.Engine) error { + type AccessToken struct { + Scope string } - type dbfsData struct { - ID int64 `xorm:"pk autoincr"` - Revision int64 `xorm:"BIGINT NOT NULL"` - MetaID int64 `xorm:"BIGINT index(meta_offset) NOT NULL"` - BlobOffset int64 `xorm:"BIGINT index(meta_offset) NOT NULL"` - BlobSize int64 `xorm:"BIGINT NOT NULL"` - BlobData []byte `xorm:"BLOB NOT NULL"` + if err := x.Sync(new(AccessToken)); err != nil { + return err } - return x.Sync( - new(ActionRunner), - new(ActionRunnerToken), - new(ActionRun), - new(ActionRunJob), - new(Repository), - new(ActionRunIndex), - new(ActionTask), - new(ActionTaskStep), - new(dbfsMeta), - new(dbfsData), - ) + // all previous tokens have `all` and `sudo` scopes + _, err := x.Exec("UPDATE access_token SET scope = ? WHERE scope IS NULL OR scope = ''", "all,sudo") + return err } diff --git a/models/migrations/v1_19/v241.go b/models/migrations/v1_19/v241.go index a617d6fd2f6f2..4505f86299556 100644 --- a/models/migrations/v1_19/v241.go +++ b/models/migrations/v1_19/v241.go @@ -4,14 +4,173 @@ package v1_19 //nolint import ( + "code.gitea.io/gitea/models/db" + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -// AddCardTypeToProjectTable: add CardType column, setting existing rows to CardTypeTextOnly -func AddCardTypeToProjectTable(x *xorm.Engine) error { - type Project struct { - CardType int `xorm:"NOT NULL DEFAULT 0"` +func AddActionsTables(x *xorm.Engine) error { + type ActionRunner struct { + ID int64 + UUID string `xorm:"CHAR(36) UNIQUE"` + Name string `xorm:"VARCHAR(255)"` + OwnerID int64 `xorm:"index"` // org level runner, 0 means system + RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global + Description string `xorm:"TEXT"` + Base int // 0 native 1 docker 2 virtual machine + RepoRange string // glob match which repositories could use this runner + + Token string `xorm:"-"` + TokenHash string `xorm:"UNIQUE"` // sha256 of token + TokenSalt string + // TokenLastEight string `xorm:"token_last_eight"` // it's unnecessary because we don't find runners by token + + LastOnline timeutil.TimeStamp `xorm:"index"` + LastActive timeutil.TimeStamp `xorm:"index"` + + // Store OS and Artch. + AgentLabels []string + // Store custom labes use defined. + CustomLabels []string + + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated"` + Deleted timeutil.TimeStamp `xorm:"deleted"` + } + + type ActionRunnerToken struct { + ID int64 + Token string `xorm:"UNIQUE"` + OwnerID int64 `xorm:"index"` // org level runner, 0 means system + RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global + IsActive bool + + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated"` + Deleted timeutil.TimeStamp `xorm:"deleted"` + } + + type ActionRun struct { + ID int64 + Title string + RepoID int64 `xorm:"index unique(repo_index)"` + OwnerID int64 `xorm:"index"` + WorkflowID string `xorm:"index"` // the name of workflow file + Index int64 `xorm:"index unique(repo_index)"` // a unique number for each run of a repository + TriggerUserID int64 + Ref string + CommitSHA string + Event string + IsForkPullRequest bool + EventPayload string `xorm:"LONGTEXT"` + Status int `xorm:"index"` + Started timeutil.TimeStamp + Stopped timeutil.TimeStamp + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated"` + } + + type ActionRunJob struct { + ID int64 + RunID int64 `xorm:"index"` + RepoID int64 `xorm:"index"` + OwnerID int64 `xorm:"index"` + CommitSHA string `xorm:"index"` + IsForkPullRequest bool + Name string `xorm:"VARCHAR(255)"` + Attempt int64 + WorkflowPayload []byte + JobID string `xorm:"VARCHAR(255)"` // job id in workflow, not job's id + Needs []string `xorm:"JSON TEXT"` + RunsOn []string `xorm:"JSON TEXT"` + TaskID int64 // the latest task of the job + Status int `xorm:"index"` + Started timeutil.TimeStamp + Stopped timeutil.TimeStamp + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated index"` + } + + type Repository struct { + NumActionRuns int `xorm:"NOT NULL DEFAULT 0"` + NumClosedActionRuns int `xorm:"NOT NULL DEFAULT 0"` + } + + type ActionRunIndex db.ResourceIndex + + type ActionTask struct { + ID int64 + JobID int64 + Attempt int64 + RunnerID int64 `xorm:"index"` + Status int `xorm:"index"` + Started timeutil.TimeStamp `xorm:"index"` + Stopped timeutil.TimeStamp + + RepoID int64 `xorm:"index"` + OwnerID int64 `xorm:"index"` + CommitSHA string `xorm:"index"` + IsForkPullRequest bool + + TokenHash string `xorm:"UNIQUE"` // sha256 of token + TokenSalt string + TokenLastEight string `xorm:"index token_last_eight"` + + LogFilename string // file name of log + LogInStorage bool // read log from database or from storage + LogLength int64 // lines count + LogSize int64 // blob size + LogIndexes []int64 `xorm:"LONGBLOB"` // line number to offset + LogExpired bool // files that are too old will be deleted + + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated index"` + } + + type ActionTaskStep struct { + ID int64 + Name string `xorm:"VARCHAR(255)"` + TaskID int64 `xorm:"index unique(task_index)"` + Index int64 `xorm:"index unique(task_index)"` + RepoID int64 `xorm:"index"` + Status int `xorm:"index"` + LogIndex int64 + LogLength int64 + Started timeutil.TimeStamp + Stopped timeutil.TimeStamp + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated"` + } + + type dbfsMeta struct { + ID int64 `xorm:"pk autoincr"` + FullPath string `xorm:"VARCHAR(500) UNIQUE NOT NULL"` + BlockSize int64 `xorm:"BIGINT NOT NULL"` + FileSize int64 `xorm:"BIGINT NOT NULL"` + CreateTimestamp int64 `xorm:"BIGINT NOT NULL"` + ModifyTimestamp int64 `xorm:"BIGINT NOT NULL"` + } + + type dbfsData struct { + ID int64 `xorm:"pk autoincr"` + Revision int64 `xorm:"BIGINT NOT NULL"` + MetaID int64 `xorm:"BIGINT index(meta_offset) NOT NULL"` + BlobOffset int64 `xorm:"BIGINT index(meta_offset) NOT NULL"` + BlobSize int64 `xorm:"BIGINT NOT NULL"` + BlobData []byte `xorm:"BLOB NOT NULL"` } - return x.Sync(new(Project)) + return x.Sync( + new(ActionRunner), + new(ActionRunnerToken), + new(ActionRun), + new(ActionRunJob), + new(Repository), + new(ActionRunIndex), + new(ActionTask), + new(ActionTaskStep), + new(dbfsMeta), + new(dbfsData), + ) } diff --git a/models/migrations/v1_19/v242.go b/models/migrations/v1_19/v242.go index 4470835214f34..a617d6fd2f6f2 100644 --- a/models/migrations/v1_19/v242.go +++ b/models/migrations/v1_19/v242.go @@ -1,26 +1,17 @@ -// Copyright 2023 The Gitea Authors. All rights reserved. +// Copyright 2022 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_19 //nolint import ( - "code.gitea.io/gitea/modules/setting" - "xorm.io/xorm" ) -// AlterPublicGPGKeyImportContentFieldToMediumText: set GPGKeyImport Content field to MEDIUMTEXT -func AlterPublicGPGKeyImportContentFieldToMediumText(x *xorm.Engine) error { - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err +// AddCardTypeToProjectTable: add CardType column, setting existing rows to CardTypeTextOnly +func AddCardTypeToProjectTable(x *xorm.Engine) error { + type Project struct { + CardType int `xorm:"NOT NULL DEFAULT 0"` } - if setting.Database.Type.IsMySQL() { - if _, err := sess.Exec("ALTER TABLE `gpg_key_import` CHANGE `content` `content` MEDIUMTEXT"); err != nil { - return err - } - } - return sess.Commit() + return x.Sync(new(Project)) } diff --git a/models/migrations/v1_19/v243.go b/models/migrations/v1_19/v243.go index 55bbfafb2fa4a..4470835214f34 100644 --- a/models/migrations/v1_19/v243.go +++ b/models/migrations/v1_19/v243.go @@ -4,13 +4,23 @@ package v1_19 //nolint import ( + "code.gitea.io/gitea/modules/setting" + "xorm.io/xorm" ) -func AddExclusiveLabel(x *xorm.Engine) error { - type Label struct { - Exclusive bool +// AlterPublicGPGKeyImportContentFieldToMediumText: set GPGKeyImport Content field to MEDIUMTEXT +func AlterPublicGPGKeyImportContentFieldToMediumText(x *xorm.Engine) error { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err } - return x.Sync(new(Label)) + if setting.Database.Type.IsMySQL() { + if _, err := sess.Exec("ALTER TABLE `gpg_key_import` CHANGE `content` `content` MEDIUMTEXT"); err != nil { + return err + } + } + return sess.Commit() } diff --git a/models/migrations/v1_19/v244.go b/models/migrations/v1_19/v244.go new file mode 100644 index 0000000000000..55bbfafb2fa4a --- /dev/null +++ b/models/migrations/v1_19/v244.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_19 //nolint + +import ( + "xorm.io/xorm" +) + +func AddExclusiveLabel(x *xorm.Engine) error { + type Label struct { + Exclusive bool + } + + return x.Sync(new(Label)) +} diff --git a/models/migrations/v1_20/v244.go b/models/migrations/v1_20/v244.go deleted file mode 100644 index 977566ad7dcd2..0000000000000 --- a/models/migrations/v1_20/v244.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2023 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_20 //nolint - -import ( - "xorm.io/xorm" -) - -func AddNeedApprovalToActionRun(x *xorm.Engine) error { - /* - New index: TriggerUserID - New fields: NeedApproval, ApprovedBy - */ - type ActionRun struct { - TriggerUserID int64 `xorm:"index"` - NeedApproval bool // may need approval if it's a fork pull request - ApprovedBy int64 `xorm:"index"` // who approved - } - - return x.Sync(new(ActionRun)) -} diff --git a/models/migrations/v1_20/v245.go b/models/migrations/v1_20/v245.go index 466f21c239d09..977566ad7dcd2 100644 --- a/models/migrations/v1_20/v245.go +++ b/models/migrations/v1_20/v245.go @@ -4,71 +4,19 @@ package v1_20 //nolint import ( - "context" - "fmt" - - "code.gitea.io/gitea/models/migrations/base" - "code.gitea.io/gitea/modules/setting" - "xorm.io/xorm" ) -func RenameWebhookOrgToOwner(x *xorm.Engine) error { - type Webhook struct { - OrgID int64 `xorm:"INDEX"` - } - - // This migration maybe rerun so that we should check if it has been run - ownerExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webhook", "owner_id") - if err != nil { - return err - } - - if ownerExist { - orgExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webhook", "org_id") - if err != nil { - return err - } - if !orgExist { - return nil - } - } - - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - - if err := sess.Sync2(new(Webhook)); err != nil { - return err - } - - if ownerExist { - if err := base.DropTableColumns(sess, "webhook", "owner_id"); err != nil { - return err - } - } - - switch { - case setting.Database.Type.IsMySQL(): - inferredTable, err := x.TableInfo(new(Webhook)) - if err != nil { - return err - } - sqlType := x.Dialect().SQLType(inferredTable.GetColumn("org_id")) - if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `webhook` CHANGE org_id owner_id %s", sqlType)); err != nil { - return err - } - case setting.Database.Type.IsMSSQL(): - if _, err := sess.Exec("sp_rename 'webhook.org_id', 'owner_id', 'COLUMN'"); err != nil { - return err - } - default: - if _, err := sess.Exec("ALTER TABLE `webhook` RENAME COLUMN org_id TO owner_id"); err != nil { - return err - } +func AddNeedApprovalToActionRun(x *xorm.Engine) error { + /* + New index: TriggerUserID + New fields: NeedApproval, ApprovedBy + */ + type ActionRun struct { + TriggerUserID int64 `xorm:"index"` + NeedApproval bool // may need approval if it's a fork pull request + ApprovedBy int64 `xorm:"index"` // who approved } - return sess.Commit() + return x.Sync(new(ActionRun)) } diff --git a/models/migrations/v1_20/v246.go b/models/migrations/v1_20/v246.go index e6340ef079d68..466f21c239d09 100644 --- a/models/migrations/v1_20/v246.go +++ b/models/migrations/v1_20/v246.go @@ -4,13 +4,71 @@ package v1_20 //nolint import ( + "context" + "fmt" + + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/setting" + "xorm.io/xorm" ) -func AddNewColumnForProject(x *xorm.Engine) error { - type Project struct { - OwnerID int64 `xorm:"INDEX"` +func RenameWebhookOrgToOwner(x *xorm.Engine) error { + type Webhook struct { + OrgID int64 `xorm:"INDEX"` + } + + // This migration maybe rerun so that we should check if it has been run + ownerExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webhook", "owner_id") + if err != nil { + return err + } + + if ownerExist { + orgExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "webhook", "org_id") + if err != nil { + return err + } + if !orgExist { + return nil + } + } + + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + + if err := sess.Sync2(new(Webhook)); err != nil { + return err + } + + if ownerExist { + if err := base.DropTableColumns(sess, "webhook", "owner_id"); err != nil { + return err + } + } + + switch { + case setting.Database.Type.IsMySQL(): + inferredTable, err := x.TableInfo(new(Webhook)) + if err != nil { + return err + } + sqlType := x.Dialect().SQLType(inferredTable.GetColumn("org_id")) + if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `webhook` CHANGE org_id owner_id %s", sqlType)); err != nil { + return err + } + case setting.Database.Type.IsMSSQL(): + if _, err := sess.Exec("sp_rename 'webhook.org_id', 'owner_id', 'COLUMN'"); err != nil { + return err + } + default: + if _, err := sess.Exec("ALTER TABLE `webhook` RENAME COLUMN org_id TO owner_id"); err != nil { + return err + } } - return x.Sync(new(Project)) + return sess.Commit() } diff --git a/models/migrations/v1_20/v247.go b/models/migrations/v1_20/v247.go index 59fc5c46b5dbc..e6340ef079d68 100644 --- a/models/migrations/v1_20/v247.go +++ b/models/migrations/v1_20/v247.go @@ -4,47 +4,13 @@ package v1_20 //nolint import ( - "code.gitea.io/gitea/modules/log" - "xorm.io/xorm" ) -// FixIncorrectProjectType: set individual project's type from 3(TypeOrganization) to 1(TypeIndividual) -func FixIncorrectProjectType(x *xorm.Engine) error { - type User struct { - ID int64 `xorm:"pk autoincr"` - Type int - } - - const ( - UserTypeIndividual int = 0 - - TypeIndividual uint8 = 1 - TypeOrganization uint8 = 3 - ) - +func AddNewColumnForProject(x *xorm.Engine) error { type Project struct { OwnerID int64 `xorm:"INDEX"` - Type uint8 - Owner *User `xorm:"extends"` - } - - sess := x.NewSession() - defer sess.Close() - - if err := sess.Begin(); err != nil { - return err - } - - count, err := sess.Table("project"). - Where("type = ? AND owner_id IN (SELECT id FROM `user` WHERE type = ?)", TypeOrganization, UserTypeIndividual). - Update(&Project{ - Type: TypeIndividual, - }) - if err != nil { - return err } - log.Debug("Updated %d projects to belong to a user instead of an organization", count) - return sess.Commit() + return x.Sync(new(Project)) } diff --git a/models/migrations/v1_20/v248.go b/models/migrations/v1_20/v248.go index 40555210e7e0b..59fc5c46b5dbc 100644 --- a/models/migrations/v1_20/v248.go +++ b/models/migrations/v1_20/v248.go @@ -3,12 +3,48 @@ package v1_20 //nolint -import "xorm.io/xorm" +import ( + "code.gitea.io/gitea/modules/log" -func AddVersionToActionRunner(x *xorm.Engine) error { - type ActionRunner struct { - Version string `xorm:"VARCHAR(64)"` // the version of act_runner + "xorm.io/xorm" +) + +// FixIncorrectProjectType: set individual project's type from 3(TypeOrganization) to 1(TypeIndividual) +func FixIncorrectProjectType(x *xorm.Engine) error { + type User struct { + ID int64 `xorm:"pk autoincr"` + Type int + } + + const ( + UserTypeIndividual int = 0 + + TypeIndividual uint8 = 1 + TypeOrganization uint8 = 3 + ) + + type Project struct { + OwnerID int64 `xorm:"INDEX"` + Type uint8 + Owner *User `xorm:"extends"` + } + + sess := x.NewSession() + defer sess.Close() + + if err := sess.Begin(); err != nil { + return err + } + + count, err := sess.Table("project"). + Where("type = ? AND owner_id IN (SELECT id FROM `user` WHERE type = ?)", TypeOrganization, UserTypeIndividual). + Update(&Project{ + Type: TypeIndividual, + }) + if err != nil { + return err } + log.Debug("Updated %d projects to belong to a user instead of an organization", count) - return x.Sync(new(ActionRunner)) + return sess.Commit() } diff --git a/models/migrations/v1_20/v249.go b/models/migrations/v1_20/v249.go index 02951a74d6d1c..40555210e7e0b 100644 --- a/models/migrations/v1_20/v249.go +++ b/models/migrations/v1_20/v249.go @@ -3,43 +3,12 @@ package v1_20 //nolint -import ( - "code.gitea.io/gitea/modules/timeutil" +import "xorm.io/xorm" - "xorm.io/xorm" - "xorm.io/xorm/schemas" -) +func AddVersionToActionRunner(x *xorm.Engine) error { + type ActionRunner struct { + Version string `xorm:"VARCHAR(64)"` // the version of act_runner + } -type Action struct { - UserID int64 // Receiver user id. - ActUserID int64 // Action user id. - RepoID int64 - IsDeleted bool `xorm:"NOT NULL DEFAULT false"` - IsPrivate bool `xorm:"NOT NULL DEFAULT false"` - CreatedUnix timeutil.TimeStamp `xorm:"created"` -} - -// TableName sets the name of this table -func (a *Action) TableName() string { - return "action" -} - -// TableIndices implements xorm's TableIndices interface -func (a *Action) TableIndices() []*schemas.Index { - repoIndex := schemas.NewIndex("r_u_d", schemas.IndexType) - repoIndex.AddColumn("repo_id", "user_id", "is_deleted") - - actUserIndex := schemas.NewIndex("au_r_c_u_d", schemas.IndexType) - actUserIndex.AddColumn("act_user_id", "repo_id", "created_unix", "user_id", "is_deleted") - - cudIndex := schemas.NewIndex("c_u_d", schemas.IndexType) - cudIndex.AddColumn("created_unix", "user_id", "is_deleted") - - indices := []*schemas.Index{actUserIndex, repoIndex, cudIndex} - - return indices -} - -func ImproveActionTableIndices(x *xorm.Engine) error { - return x.Sync(new(Action)) + return x.Sync(new(ActionRunner)) } diff --git a/models/migrations/v1_20/v250.go b/models/migrations/v1_20/v250.go index e05646e5c6dc5..02951a74d6d1c 100644 --- a/models/migrations/v1_20/v250.go +++ b/models/migrations/v1_20/v250.go @@ -4,132 +4,42 @@ package v1_20 //nolint import ( - "strings" - - "code.gitea.io/gitea/modules/json" + "code.gitea.io/gitea/modules/timeutil" "xorm.io/xorm" + "xorm.io/xorm/schemas" ) -func ChangeContainerMetadataMultiArch(x *xorm.Engine) error { - sess := x.NewSession() - defer sess.Close() - - if err := sess.Begin(); err != nil { - return err - } - - type PackageVersion struct { - ID int64 `xorm:"pk"` - MetadataJSON string `xorm:"metadata_json"` - } - - type PackageBlob struct{} - - // Get all relevant packages (manifest list images have a container.manifest.reference property) - - var pvs []*PackageVersion - err := sess. - Table("package_version"). - Select("id, metadata_json"). - Where("id IN (SELECT DISTINCT ref_id FROM package_property WHERE ref_type = 0 AND name = 'container.manifest.reference')"). - Find(&pvs) - if err != nil { - return err - } - - type MetadataOld struct { - Type string `json:"type"` - IsTagged bool `json:"is_tagged"` - Platform string `json:"platform,omitempty"` - Description string `json:"description,omitempty"` - Authors []string `json:"authors,omitempty"` - Licenses string `json:"license,omitempty"` - ProjectURL string `json:"project_url,omitempty"` - RepositoryURL string `json:"repository_url,omitempty"` - DocumentationURL string `json:"documentation_url,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - ImageLayers []string `json:"layer_creation,omitempty"` - MultiArch map[string]string `json:"multiarch,omitempty"` - } - - type Manifest struct { - Platform string `json:"platform"` - Digest string `json:"digest"` - Size int64 `json:"size"` - } - - type MetadataNew struct { - Type string `json:"type"` - IsTagged bool `json:"is_tagged"` - Platform string `json:"platform,omitempty"` - Description string `json:"description,omitempty"` - Authors []string `json:"authors,omitempty"` - Licenses string `json:"license,omitempty"` - ProjectURL string `json:"project_url,omitempty"` - RepositoryURL string `json:"repository_url,omitempty"` - DocumentationURL string `json:"documentation_url,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - ImageLayers []string `json:"layer_creation,omitempty"` - Manifests []*Manifest `json:"manifests,omitempty"` - } - - for _, pv := range pvs { - var old *MetadataOld - if err := json.Unmarshal([]byte(pv.MetadataJSON), &old); err != nil { - return err - } - - // Calculate the size of every contained manifest - - manifests := make([]*Manifest, 0, len(old.MultiArch)) - for platform, digest := range old.MultiArch { - size, err := sess. - Table("package_blob"). - Join("INNER", "package_file", "package_blob.id = package_file.blob_id"). - Join("INNER", "package_version pv", "pv.id = package_file.version_id"). - Join("INNER", "package_version pv2", "pv2.package_id = pv.package_id"). - Where("pv.lower_version = ? AND pv2.id = ?", strings.ToLower(digest), pv.ID). - SumInt(new(PackageBlob), "size") - if err != nil { - return err - } +type Action struct { + UserID int64 // Receiver user id. + ActUserID int64 // Action user id. + RepoID int64 + IsDeleted bool `xorm:"NOT NULL DEFAULT false"` + IsPrivate bool `xorm:"NOT NULL DEFAULT false"` + CreatedUnix timeutil.TimeStamp `xorm:"created"` +} - manifests = append(manifests, &Manifest{ - Platform: platform, - Digest: digest, - Size: size, - }) - } +// TableName sets the name of this table +func (a *Action) TableName() string { + return "action" +} - // Convert to new metadata format +// TableIndices implements xorm's TableIndices interface +func (a *Action) TableIndices() []*schemas.Index { + repoIndex := schemas.NewIndex("r_u_d", schemas.IndexType) + repoIndex.AddColumn("repo_id", "user_id", "is_deleted") - new := &MetadataNew{ - Type: old.Type, - IsTagged: old.IsTagged, - Platform: old.Platform, - Description: old.Description, - Authors: old.Authors, - Licenses: old.Licenses, - ProjectURL: old.ProjectURL, - RepositoryURL: old.RepositoryURL, - DocumentationURL: old.DocumentationURL, - Labels: old.Labels, - ImageLayers: old.ImageLayers, - Manifests: manifests, - } + actUserIndex := schemas.NewIndex("au_r_c_u_d", schemas.IndexType) + actUserIndex.AddColumn("act_user_id", "repo_id", "created_unix", "user_id", "is_deleted") - metadataJSON, err := json.Marshal(new) - if err != nil { - return err - } + cudIndex := schemas.NewIndex("c_u_d", schemas.IndexType) + cudIndex.AddColumn("created_unix", "user_id", "is_deleted") - pv.MetadataJSON = string(metadataJSON) + indices := []*schemas.Index{actUserIndex, repoIndex, cudIndex} - if _, err := sess.ID(pv.ID).Update(pv); err != nil { - return err - } - } + return indices +} - return sess.Commit() +func ImproveActionTableIndices(x *xorm.Engine) error { + return x.Sync(new(Action)) } diff --git a/models/migrations/v1_20/v251.go b/models/migrations/v1_20/v251.go index 7743248a3f17b..e05646e5c6dc5 100644 --- a/models/migrations/v1_20/v251.go +++ b/models/migrations/v1_20/v251.go @@ -4,28 +4,14 @@ package v1_20 //nolint import ( - "code.gitea.io/gitea/modules/log" + "strings" + + "code.gitea.io/gitea/modules/json" "xorm.io/xorm" ) -func FixIncorrectOwnerTeamUnitAccessMode(x *xorm.Engine) error { - type UnitType int - type AccessMode int - - type TeamUnit struct { - ID int64 `xorm:"pk autoincr"` - OrgID int64 `xorm:"INDEX"` - TeamID int64 `xorm:"UNIQUE(s)"` - Type UnitType `xorm:"UNIQUE(s)"` - AccessMode AccessMode - } - - const ( - // AccessModeOwner owner access - AccessModeOwner = 4 - ) - +func ChangeContainerMetadataMultiArch(x *xorm.Engine) error { sess := x.NewSession() defer sess.Close() @@ -33,15 +19,117 @@ func FixIncorrectOwnerTeamUnitAccessMode(x *xorm.Engine) error { return err } - count, err := sess.Table("team_unit"). - Where("team_id IN (SELECT id FROM team WHERE authorize = ?)", AccessModeOwner). - Update(&TeamUnit{ - AccessMode: AccessModeOwner, - }) + type PackageVersion struct { + ID int64 `xorm:"pk"` + MetadataJSON string `xorm:"metadata_json"` + } + + type PackageBlob struct{} + + // Get all relevant packages (manifest list images have a container.manifest.reference property) + + var pvs []*PackageVersion + err := sess. + Table("package_version"). + Select("id, metadata_json"). + Where("id IN (SELECT DISTINCT ref_id FROM package_property WHERE ref_type = 0 AND name = 'container.manifest.reference')"). + Find(&pvs) if err != nil { return err } - log.Debug("Updated %d owner team unit access mode to belong to owner instead of none", count) + + type MetadataOld struct { + Type string `json:"type"` + IsTagged bool `json:"is_tagged"` + Platform string `json:"platform,omitempty"` + Description string `json:"description,omitempty"` + Authors []string `json:"authors,omitempty"` + Licenses string `json:"license,omitempty"` + ProjectURL string `json:"project_url,omitempty"` + RepositoryURL string `json:"repository_url,omitempty"` + DocumentationURL string `json:"documentation_url,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + ImageLayers []string `json:"layer_creation,omitempty"` + MultiArch map[string]string `json:"multiarch,omitempty"` + } + + type Manifest struct { + Platform string `json:"platform"` + Digest string `json:"digest"` + Size int64 `json:"size"` + } + + type MetadataNew struct { + Type string `json:"type"` + IsTagged bool `json:"is_tagged"` + Platform string `json:"platform,omitempty"` + Description string `json:"description,omitempty"` + Authors []string `json:"authors,omitempty"` + Licenses string `json:"license,omitempty"` + ProjectURL string `json:"project_url,omitempty"` + RepositoryURL string `json:"repository_url,omitempty"` + DocumentationURL string `json:"documentation_url,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + ImageLayers []string `json:"layer_creation,omitempty"` + Manifests []*Manifest `json:"manifests,omitempty"` + } + + for _, pv := range pvs { + var old *MetadataOld + if err := json.Unmarshal([]byte(pv.MetadataJSON), &old); err != nil { + return err + } + + // Calculate the size of every contained manifest + + manifests := make([]*Manifest, 0, len(old.MultiArch)) + for platform, digest := range old.MultiArch { + size, err := sess. + Table("package_blob"). + Join("INNER", "package_file", "package_blob.id = package_file.blob_id"). + Join("INNER", "package_version pv", "pv.id = package_file.version_id"). + Join("INNER", "package_version pv2", "pv2.package_id = pv.package_id"). + Where("pv.lower_version = ? AND pv2.id = ?", strings.ToLower(digest), pv.ID). + SumInt(new(PackageBlob), "size") + if err != nil { + return err + } + + manifests = append(manifests, &Manifest{ + Platform: platform, + Digest: digest, + Size: size, + }) + } + + // Convert to new metadata format + + new := &MetadataNew{ + Type: old.Type, + IsTagged: old.IsTagged, + Platform: old.Platform, + Description: old.Description, + Authors: old.Authors, + Licenses: old.Licenses, + ProjectURL: old.ProjectURL, + RepositoryURL: old.RepositoryURL, + DocumentationURL: old.DocumentationURL, + Labels: old.Labels, + ImageLayers: old.ImageLayers, + Manifests: manifests, + } + + metadataJSON, err := json.Marshal(new) + if err != nil { + return err + } + + pv.MetadataJSON = string(metadataJSON) + + if _, err := sess.ID(pv.ID).Update(pv); err != nil { + return err + } + } return sess.Commit() } diff --git a/models/migrations/v1_20/v252.go b/models/migrations/v1_20/v252.go index ab61cd9b8b36e..7743248a3f17b 100644 --- a/models/migrations/v1_20/v252.go +++ b/models/migrations/v1_20/v252.go @@ -9,7 +9,7 @@ import ( "xorm.io/xorm" ) -func FixIncorrectAdminTeamUnitAccessMode(x *xorm.Engine) error { +func FixIncorrectOwnerTeamUnitAccessMode(x *xorm.Engine) error { type UnitType int type AccessMode int @@ -22,8 +22,8 @@ func FixIncorrectAdminTeamUnitAccessMode(x *xorm.Engine) error { } const ( - // AccessModeAdmin admin access - AccessModeAdmin = 3 + // AccessModeOwner owner access + AccessModeOwner = 4 ) sess := x.NewSession() @@ -34,14 +34,14 @@ func FixIncorrectAdminTeamUnitAccessMode(x *xorm.Engine) error { } count, err := sess.Table("team_unit"). - Where("team_id IN (SELECT id FROM team WHERE authorize = ?)", AccessModeAdmin). + Where("team_id IN (SELECT id FROM team WHERE authorize = ?)", AccessModeOwner). Update(&TeamUnit{ - AccessMode: AccessModeAdmin, + AccessMode: AccessModeOwner, }) if err != nil { return err } - log.Debug("Updated %d admin team unit access mode to belong to admin instead of none", count) + log.Debug("Updated %d owner team unit access mode to belong to owner instead of none", count) return sess.Commit() } diff --git a/models/migrations/v1_20/v253.go b/models/migrations/v1_20/v253.go index 96c494bd8d903..ab61cd9b8b36e 100644 --- a/models/migrations/v1_20/v253.go +++ b/models/migrations/v1_20/v253.go @@ -9,23 +9,21 @@ import ( "xorm.io/xorm" ) -func FixExternalTrackerAndExternalWikiAccessModeInOwnerAndAdminTeam(x *xorm.Engine) error { +func FixIncorrectAdminTeamUnitAccessMode(x *xorm.Engine) error { type UnitType int type AccessMode int type TeamUnit struct { ID int64 `xorm:"pk autoincr"` + OrgID int64 `xorm:"INDEX"` + TeamID int64 `xorm:"UNIQUE(s)"` Type UnitType `xorm:"UNIQUE(s)"` AccessMode AccessMode } const ( - // AccessModeRead read access - AccessModeRead = 1 - - // Unit Type - TypeExternalWiki = 6 - TypeExternalTracker = 7 + // AccessModeAdmin admin access + AccessModeAdmin = 3 ) sess := x.NewSession() @@ -36,14 +34,14 @@ func FixExternalTrackerAndExternalWikiAccessModeInOwnerAndAdminTeam(x *xorm.Engi } count, err := sess.Table("team_unit"). - Where("type IN (?, ?) AND access_mode > ?", TypeExternalWiki, TypeExternalTracker, AccessModeRead). + Where("team_id IN (SELECT id FROM team WHERE authorize = ?)", AccessModeAdmin). Update(&TeamUnit{ - AccessMode: AccessModeRead, + AccessMode: AccessModeAdmin, }) if err != nil { return err } - log.Debug("Updated %d ExternalTracker and ExternalWiki access mode to belong to owner and admin", count) + log.Debug("Updated %d admin team unit access mode to belong to admin instead of none", count) return sess.Commit() } diff --git a/models/migrations/v1_20/v254.go b/models/migrations/v1_20/v254.go index 1e26979a5b2a2..96c494bd8d903 100644 --- a/models/migrations/v1_20/v254.go +++ b/models/migrations/v1_20/v254.go @@ -4,15 +4,46 @@ package v1_20 //nolint import ( + "code.gitea.io/gitea/modules/log" + "xorm.io/xorm" ) -func AddActionTaskOutputTable(x *xorm.Engine) error { - type ActionTaskOutput struct { - ID int64 - TaskID int64 `xorm:"INDEX UNIQUE(task_id_output_key)"` - OutputKey string `xorm:"VARCHAR(255) UNIQUE(task_id_output_key)"` - OutputValue string `xorm:"MEDIUMTEXT"` +func FixExternalTrackerAndExternalWikiAccessModeInOwnerAndAdminTeam(x *xorm.Engine) error { + type UnitType int + type AccessMode int + + type TeamUnit struct { + ID int64 `xorm:"pk autoincr"` + Type UnitType `xorm:"UNIQUE(s)"` + AccessMode AccessMode + } + + const ( + // AccessModeRead read access + AccessModeRead = 1 + + // Unit Type + TypeExternalWiki = 6 + TypeExternalTracker = 7 + ) + + sess := x.NewSession() + defer sess.Close() + + if err := sess.Begin(); err != nil { + return err } - return x.Sync(new(ActionTaskOutput)) + + count, err := sess.Table("team_unit"). + Where("type IN (?, ?) AND access_mode > ?", TypeExternalWiki, TypeExternalTracker, AccessModeRead). + Update(&TeamUnit{ + AccessMode: AccessModeRead, + }) + if err != nil { + return err + } + log.Debug("Updated %d ExternalTracker and ExternalWiki access mode to belong to owner and admin", count) + + return sess.Commit() } diff --git a/models/migrations/v1_20/v255.go b/models/migrations/v1_20/v255.go index 14b70f8f962f9..1e26979a5b2a2 100644 --- a/models/migrations/v1_20/v255.go +++ b/models/migrations/v1_20/v255.go @@ -4,20 +4,15 @@ package v1_20 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func AddArchivedUnixToRepository(x *xorm.Engine) error { - type Repository struct { - ArchivedUnix timeutil.TimeStamp `xorm:"DEFAULT 0"` - } - - if err := x.Sync(new(Repository)); err != nil { - return err +func AddActionTaskOutputTable(x *xorm.Engine) error { + type ActionTaskOutput struct { + ID int64 + TaskID int64 `xorm:"INDEX UNIQUE(task_id_output_key)"` + OutputKey string `xorm:"VARCHAR(255) UNIQUE(task_id_output_key)"` + OutputValue string `xorm:"MEDIUMTEXT"` } - - _, err := x.Exec("UPDATE repository SET archived_unix = updated_unix WHERE is_archived = ? AND archived_unix = 0", true) - return err + return x.Sync(new(ActionTaskOutput)) } diff --git a/models/migrations/v1_20/v256.go b/models/migrations/v1_20/v256.go index 822153b93e568..14b70f8f962f9 100644 --- a/models/migrations/v1_20/v256.go +++ b/models/migrations/v1_20/v256.go @@ -4,20 +4,20 @@ package v1_20 //nolint import ( + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func AddIsInternalColumnToPackage(x *xorm.Engine) error { - type Package struct { - ID int64 `xorm:"pk autoincr"` - OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` - RepoID int64 `xorm:"INDEX"` - Type string `xorm:"UNIQUE(s) INDEX NOT NULL"` - Name string `xorm:"NOT NULL"` - LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` - SemverCompatible bool `xorm:"NOT NULL DEFAULT false"` - IsInternal bool `xorm:"NOT NULL DEFAULT false"` +func AddArchivedUnixToRepository(x *xorm.Engine) error { + type Repository struct { + ArchivedUnix timeutil.TimeStamp `xorm:"DEFAULT 0"` + } + + if err := x.Sync(new(Repository)); err != nil { + return err } - return x.Sync(new(Package)) + _, err := x.Exec("UPDATE repository SET archived_unix = updated_unix WHERE is_archived = ? AND archived_unix = 0", true) + return err } diff --git a/models/migrations/v1_20/v257.go b/models/migrations/v1_20/v257.go index 6c6ca4c7486d0..822153b93e568 100644 --- a/models/migrations/v1_20/v257.go +++ b/models/migrations/v1_20/v257.go @@ -4,30 +4,20 @@ package v1_20 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func CreateActionArtifactTable(x *xorm.Engine) error { - // ActionArtifact is a file that is stored in the artifact storage. - type ActionArtifact struct { - ID int64 `xorm:"pk autoincr"` - RunID int64 `xorm:"index UNIQUE(runid_name)"` // The run id of the artifact - RunnerID int64 - RepoID int64 `xorm:"index"` - OwnerID int64 - CommitSHA string - StoragePath string // The path to the artifact in the storage - FileSize int64 // The size of the artifact in bytes - FileCompressedSize int64 // The size of the artifact in bytes after gzip compression - ContentEncoding string // The content encoding of the artifact - ArtifactPath string // The path to the artifact when runner uploads it - ArtifactName string `xorm:"UNIQUE(runid_name)"` // The name of the artifact when runner uploads it - Status int64 `xorm:"index"` // The status of the artifact - CreatedUnix timeutil.TimeStamp `xorm:"created"` - UpdatedUnix timeutil.TimeStamp `xorm:"updated index"` +func AddIsInternalColumnToPackage(x *xorm.Engine) error { + type Package struct { + ID int64 `xorm:"pk autoincr"` + OwnerID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` + RepoID int64 `xorm:"INDEX"` + Type string `xorm:"UNIQUE(s) INDEX NOT NULL"` + Name string `xorm:"NOT NULL"` + LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` + SemverCompatible bool `xorm:"NOT NULL DEFAULT false"` + IsInternal bool `xorm:"NOT NULL DEFAULT false"` } - return x.Sync(new(ActionArtifact)) + return x.Sync(new(Package)) } diff --git a/models/migrations/v1_20/v258.go b/models/migrations/v1_20/v258.go index 47174ce8051a9..6c6ca4c7486d0 100644 --- a/models/migrations/v1_20/v258.go +++ b/models/migrations/v1_20/v258.go @@ -4,13 +4,30 @@ package v1_20 //nolint import ( + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func AddPinOrderToIssue(x *xorm.Engine) error { - type Issue struct { - PinOrder int `xorm:"DEFAULT 0"` +func CreateActionArtifactTable(x *xorm.Engine) error { + // ActionArtifact is a file that is stored in the artifact storage. + type ActionArtifact struct { + ID int64 `xorm:"pk autoincr"` + RunID int64 `xorm:"index UNIQUE(runid_name)"` // The run id of the artifact + RunnerID int64 + RepoID int64 `xorm:"index"` + OwnerID int64 + CommitSHA string + StoragePath string // The path to the artifact in the storage + FileSize int64 // The size of the artifact in bytes + FileCompressedSize int64 // The size of the artifact in bytes after gzip compression + ContentEncoding string // The content encoding of the artifact + ArtifactPath string // The path to the artifact when runner uploads it + ArtifactName string `xorm:"UNIQUE(runid_name)"` // The name of the artifact when runner uploads it + Status int64 `xorm:"index"` // The status of the artifact + CreatedUnix timeutil.TimeStamp `xorm:"created"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated index"` } - return x.Sync(new(Issue)) + return x.Sync(new(ActionArtifact)) } diff --git a/models/migrations/v1_20/v259.go b/models/migrations/v1_20/v259.go index 5b8ced4ad7b41..47174ce8051a9 100644 --- a/models/migrations/v1_20/v259.go +++ b/models/migrations/v1_20/v259.go @@ -4,357 +4,13 @@ package v1_20 //nolint import ( - "fmt" - "strings" - - "code.gitea.io/gitea/modules/log" - "xorm.io/xorm" ) -// unknownAccessTokenScope represents the scope for an access token that isn't -// known be an old token or a new token. -type unknownAccessTokenScope string - -// AccessTokenScope represents the scope for an access token. -type AccessTokenScope string - -// for all categories, write implies read -const ( - AccessTokenScopeAll AccessTokenScope = "all" - AccessTokenScopePublicOnly AccessTokenScope = "public-only" // limited to public orgs/repos - - AccessTokenScopeReadActivityPub AccessTokenScope = "read:activitypub" - AccessTokenScopeWriteActivityPub AccessTokenScope = "write:activitypub" - - AccessTokenScopeReadAdmin AccessTokenScope = "read:admin" - AccessTokenScopeWriteAdmin AccessTokenScope = "write:admin" - - AccessTokenScopeReadMisc AccessTokenScope = "read:misc" - AccessTokenScopeWriteMisc AccessTokenScope = "write:misc" - - AccessTokenScopeReadNotification AccessTokenScope = "read:notification" - AccessTokenScopeWriteNotification AccessTokenScope = "write:notification" - - AccessTokenScopeReadOrganization AccessTokenScope = "read:organization" - AccessTokenScopeWriteOrganization AccessTokenScope = "write:organization" - - AccessTokenScopeReadPackage AccessTokenScope = "read:package" - AccessTokenScopeWritePackage AccessTokenScope = "write:package" - - AccessTokenScopeReadIssue AccessTokenScope = "read:issue" - AccessTokenScopeWriteIssue AccessTokenScope = "write:issue" - - AccessTokenScopeReadRepository AccessTokenScope = "read:repository" - AccessTokenScopeWriteRepository AccessTokenScope = "write:repository" - - AccessTokenScopeReadUser AccessTokenScope = "read:user" - AccessTokenScopeWriteUser AccessTokenScope = "write:user" -) - -// accessTokenScopeBitmap represents a bitmap of access token scopes. -type accessTokenScopeBitmap uint64 - -// Bitmap of each scope, including the child scopes. -const ( - // AccessTokenScopeAllBits is the bitmap of all access token scopes - accessTokenScopeAllBits accessTokenScopeBitmap = accessTokenScopeWriteActivityPubBits | - accessTokenScopeWriteAdminBits | accessTokenScopeWriteMiscBits | accessTokenScopeWriteNotificationBits | - accessTokenScopeWriteOrganizationBits | accessTokenScopeWritePackageBits | accessTokenScopeWriteIssueBits | - accessTokenScopeWriteRepositoryBits | accessTokenScopeWriteUserBits - - accessTokenScopePublicOnlyBits accessTokenScopeBitmap = 1 << iota - - accessTokenScopeReadActivityPubBits accessTokenScopeBitmap = 1 << iota - accessTokenScopeWriteActivityPubBits accessTokenScopeBitmap = 1< 64 scopes, - // refactoring the whole implementation in this file (and only this file) is needed. -) - -// allAccessTokenScopes contains all access token scopes. -// The order is important: parent scope must precede child scopes. -var allAccessTokenScopes = []AccessTokenScope{ - AccessTokenScopePublicOnly, - AccessTokenScopeWriteActivityPub, AccessTokenScopeReadActivityPub, - AccessTokenScopeWriteAdmin, AccessTokenScopeReadAdmin, - AccessTokenScopeWriteMisc, AccessTokenScopeReadMisc, - AccessTokenScopeWriteNotification, AccessTokenScopeReadNotification, - AccessTokenScopeWriteOrganization, AccessTokenScopeReadOrganization, - AccessTokenScopeWritePackage, AccessTokenScopeReadPackage, - AccessTokenScopeWriteIssue, AccessTokenScopeReadIssue, - AccessTokenScopeWriteRepository, AccessTokenScopeReadRepository, - AccessTokenScopeWriteUser, AccessTokenScopeReadUser, -} - -// allAccessTokenScopeBits contains all access token scopes. -var allAccessTokenScopeBits = map[AccessTokenScope]accessTokenScopeBitmap{ - AccessTokenScopeAll: accessTokenScopeAllBits, - AccessTokenScopePublicOnly: accessTokenScopePublicOnlyBits, - AccessTokenScopeReadActivityPub: accessTokenScopeReadActivityPubBits, - AccessTokenScopeWriteActivityPub: accessTokenScopeWriteActivityPubBits, - AccessTokenScopeReadAdmin: accessTokenScopeReadAdminBits, - AccessTokenScopeWriteAdmin: accessTokenScopeWriteAdminBits, - AccessTokenScopeReadMisc: accessTokenScopeReadMiscBits, - AccessTokenScopeWriteMisc: accessTokenScopeWriteMiscBits, - AccessTokenScopeReadNotification: accessTokenScopeReadNotificationBits, - AccessTokenScopeWriteNotification: accessTokenScopeWriteNotificationBits, - AccessTokenScopeReadOrganization: accessTokenScopeReadOrganizationBits, - AccessTokenScopeWriteOrganization: accessTokenScopeWriteOrganizationBits, - AccessTokenScopeReadPackage: accessTokenScopeReadPackageBits, - AccessTokenScopeWritePackage: accessTokenScopeWritePackageBits, - AccessTokenScopeReadIssue: accessTokenScopeReadIssueBits, - AccessTokenScopeWriteIssue: accessTokenScopeWriteIssueBits, - AccessTokenScopeReadRepository: accessTokenScopeReadRepositoryBits, - AccessTokenScopeWriteRepository: accessTokenScopeWriteRepositoryBits, - AccessTokenScopeReadUser: accessTokenScopeReadUserBits, - AccessTokenScopeWriteUser: accessTokenScopeWriteUserBits, -} - -// hasScope returns true if the string has the given scope -func (bitmap accessTokenScopeBitmap) hasScope(scope AccessTokenScope) (bool, error) { - expectedBits, ok := allAccessTokenScopeBits[scope] - if !ok { - return false, fmt.Errorf("invalid access token scope: %s", scope) - } - - return bitmap&expectedBits == expectedBits, nil -} - -// toScope returns a normalized scope string without any duplicates. -func (bitmap accessTokenScopeBitmap) toScope(unknownScopes *[]unknownAccessTokenScope) AccessTokenScope { - var scopes []string - - // Preserve unknown scopes, and put them at the beginning so that it's clear - // when debugging. - if unknownScopes != nil { - for _, unknownScope := range *unknownScopes { - scopes = append(scopes, string(unknownScope)) - } - } - - // iterate over all scopes, and reconstruct the bitmap - // if the reconstructed bitmap doesn't change, then the scope is already included - var reconstruct accessTokenScopeBitmap - - for _, singleScope := range allAccessTokenScopes { - // no need for error checking here, since we know the scope is valid - if ok, _ := bitmap.hasScope(singleScope); ok { - current := reconstruct | allAccessTokenScopeBits[singleScope] - if current == reconstruct { - continue - } - - reconstruct = current - scopes = append(scopes, string(singleScope)) - } - } - - scope := AccessTokenScope(strings.Join(scopes, ",")) - scope = AccessTokenScope(strings.ReplaceAll( - string(scope), - "write:activitypub,write:admin,write:misc,write:notification,write:organization,write:package,write:issue,write:repository,write:user", - "all", - )) - return scope -} - -// parse the scope string into a bitmap, thus removing possible duplicates. -func (s AccessTokenScope) parse() (accessTokenScopeBitmap, *[]unknownAccessTokenScope) { - var bitmap accessTokenScopeBitmap - var unknownScopes []unknownAccessTokenScope - - // The following is the more performant equivalent of 'for _, v := range strings.Split(remainingScope, ",")' as this is hot code - remainingScopes := string(s) - for len(remainingScopes) > 0 { - i := strings.IndexByte(remainingScopes, ',') - var v string - if i < 0 { - v = remainingScopes - remainingScopes = "" - } else if i+1 >= len(remainingScopes) { - v = remainingScopes[:i] - remainingScopes = "" - } else { - v = remainingScopes[:i] - remainingScopes = remainingScopes[i+1:] - } - singleScope := AccessTokenScope(v) - if singleScope == "" { - continue - } - if singleScope == AccessTokenScopeAll { - bitmap |= accessTokenScopeAllBits - continue - } - - bits, ok := allAccessTokenScopeBits[singleScope] - if !ok { - unknownScopes = append(unknownScopes, unknownAccessTokenScope(string(singleScope))) - } - bitmap |= bits - } - - return bitmap, &unknownScopes -} - -// NormalizePreservingUnknown returns a normalized scope string without any -// duplicates. Unknown scopes are included. -func (s AccessTokenScope) NormalizePreservingUnknown() AccessTokenScope { - bitmap, unknownScopes := s.parse() - - return bitmap.toScope(unknownScopes) -} - -// OldAccessTokenScope represents the scope for an access token. -type OldAccessTokenScope string - -const ( - OldAccessTokenScopeAll OldAccessTokenScope = "all" - - OldAccessTokenScopeRepo OldAccessTokenScope = "repo" - OldAccessTokenScopeRepoStatus OldAccessTokenScope = "repo:status" - OldAccessTokenScopePublicRepo OldAccessTokenScope = "public_repo" - - OldAccessTokenScopeAdminOrg OldAccessTokenScope = "admin:org" - OldAccessTokenScopeWriteOrg OldAccessTokenScope = "write:org" - OldAccessTokenScopeReadOrg OldAccessTokenScope = "read:org" - - OldAccessTokenScopeAdminPublicKey OldAccessTokenScope = "admin:public_key" - OldAccessTokenScopeWritePublicKey OldAccessTokenScope = "write:public_key" - OldAccessTokenScopeReadPublicKey OldAccessTokenScope = "read:public_key" - - OldAccessTokenScopeAdminRepoHook OldAccessTokenScope = "admin:repo_hook" - OldAccessTokenScopeWriteRepoHook OldAccessTokenScope = "write:repo_hook" - OldAccessTokenScopeReadRepoHook OldAccessTokenScope = "read:repo_hook" - - OldAccessTokenScopeAdminOrgHook OldAccessTokenScope = "admin:org_hook" - - OldAccessTokenScopeNotification OldAccessTokenScope = "notification" - - OldAccessTokenScopeUser OldAccessTokenScope = "user" - OldAccessTokenScopeReadUser OldAccessTokenScope = "read:user" - OldAccessTokenScopeUserEmail OldAccessTokenScope = "user:email" - OldAccessTokenScopeUserFollow OldAccessTokenScope = "user:follow" - - OldAccessTokenScopeDeleteRepo OldAccessTokenScope = "delete_repo" - - OldAccessTokenScopePackage OldAccessTokenScope = "package" - OldAccessTokenScopeWritePackage OldAccessTokenScope = "write:package" - OldAccessTokenScopeReadPackage OldAccessTokenScope = "read:package" - OldAccessTokenScopeDeletePackage OldAccessTokenScope = "delete:package" - - OldAccessTokenScopeAdminGPGKey OldAccessTokenScope = "admin:gpg_key" - OldAccessTokenScopeWriteGPGKey OldAccessTokenScope = "write:gpg_key" - OldAccessTokenScopeReadGPGKey OldAccessTokenScope = "read:gpg_key" - - OldAccessTokenScopeAdminApplication OldAccessTokenScope = "admin:application" - OldAccessTokenScopeWriteApplication OldAccessTokenScope = "write:application" - OldAccessTokenScopeReadApplication OldAccessTokenScope = "read:application" - - OldAccessTokenScopeSudo OldAccessTokenScope = "sudo" -) - -var accessTokenScopeMap = map[OldAccessTokenScope][]AccessTokenScope{ - OldAccessTokenScopeAll: {AccessTokenScopeAll}, - OldAccessTokenScopeRepo: {AccessTokenScopeWriteRepository}, - OldAccessTokenScopeRepoStatus: {AccessTokenScopeWriteRepository}, - OldAccessTokenScopePublicRepo: {AccessTokenScopePublicOnly, AccessTokenScopeWriteRepository}, - OldAccessTokenScopeAdminOrg: {AccessTokenScopeWriteOrganization}, - OldAccessTokenScopeWriteOrg: {AccessTokenScopeWriteOrganization}, - OldAccessTokenScopeReadOrg: {AccessTokenScopeReadOrganization}, - OldAccessTokenScopeAdminPublicKey: {AccessTokenScopeWriteUser}, - OldAccessTokenScopeWritePublicKey: {AccessTokenScopeWriteUser}, - OldAccessTokenScopeReadPublicKey: {AccessTokenScopeReadUser}, - OldAccessTokenScopeAdminRepoHook: {AccessTokenScopeWriteRepository}, - OldAccessTokenScopeWriteRepoHook: {AccessTokenScopeWriteRepository}, - OldAccessTokenScopeReadRepoHook: {AccessTokenScopeReadRepository}, - OldAccessTokenScopeAdminOrgHook: {AccessTokenScopeWriteOrganization}, - OldAccessTokenScopeNotification: {AccessTokenScopeWriteNotification}, - OldAccessTokenScopeUser: {AccessTokenScopeWriteUser}, - OldAccessTokenScopeReadUser: {AccessTokenScopeReadUser}, - OldAccessTokenScopeUserEmail: {AccessTokenScopeWriteUser}, - OldAccessTokenScopeUserFollow: {AccessTokenScopeWriteUser}, - OldAccessTokenScopeDeleteRepo: {AccessTokenScopeWriteRepository}, - OldAccessTokenScopePackage: {AccessTokenScopeWritePackage}, - OldAccessTokenScopeWritePackage: {AccessTokenScopeWritePackage}, - OldAccessTokenScopeReadPackage: {AccessTokenScopeReadPackage}, - OldAccessTokenScopeDeletePackage: {AccessTokenScopeWritePackage}, - OldAccessTokenScopeAdminGPGKey: {AccessTokenScopeWriteUser}, - OldAccessTokenScopeWriteGPGKey: {AccessTokenScopeWriteUser}, - OldAccessTokenScopeReadGPGKey: {AccessTokenScopeReadUser}, - OldAccessTokenScopeAdminApplication: {AccessTokenScopeWriteUser}, - OldAccessTokenScopeWriteApplication: {AccessTokenScopeWriteUser}, - OldAccessTokenScopeReadApplication: {AccessTokenScopeReadUser}, - OldAccessTokenScopeSudo: {AccessTokenScopeWriteAdmin}, -} - -type AccessToken struct { - ID int64 `xorm:"pk autoincr"` - Scope string -} - -func ConvertScopedAccessTokens(x *xorm.Engine) error { - var tokens []*AccessToken - - if err := x.Find(&tokens); err != nil { - return err - } - - for _, token := range tokens { - var scopes []string - allNewScopesMap := make(map[AccessTokenScope]bool) - for _, oldScope := range strings.Split(token.Scope, ",") { - if newScopes, exists := accessTokenScopeMap[OldAccessTokenScope(oldScope)]; exists { - for _, newScope := range newScopes { - allNewScopesMap[newScope] = true - } - } else { - log.Debug("access token scope not recognized as old token scope %s; preserving it", oldScope) - scopes = append(scopes, oldScope) - } - } - - for s := range allNewScopesMap { - scopes = append(scopes, string(s)) - } - scope := AccessTokenScope(strings.Join(scopes, ",")) - - // normalize the scope - normScope := scope.NormalizePreservingUnknown() - - token.Scope = string(normScope) - - // update the db entry with the new scope - if _, err := x.Cols("scope").ID(token.ID).Update(token); err != nil { - return err - } +func AddPinOrderToIssue(x *xorm.Engine) error { + type Issue struct { + PinOrder int `xorm:"DEFAULT 0"` } - return nil + return x.Sync(new(Issue)) } diff --git a/models/migrations/v1_20/v260.go b/models/migrations/v1_20/v260.go new file mode 100644 index 0000000000000..5b8ced4ad7b41 --- /dev/null +++ b/models/migrations/v1_20/v260.go @@ -0,0 +1,360 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_20 //nolint + +import ( + "fmt" + "strings" + + "code.gitea.io/gitea/modules/log" + + "xorm.io/xorm" +) + +// unknownAccessTokenScope represents the scope for an access token that isn't +// known be an old token or a new token. +type unknownAccessTokenScope string + +// AccessTokenScope represents the scope for an access token. +type AccessTokenScope string + +// for all categories, write implies read +const ( + AccessTokenScopeAll AccessTokenScope = "all" + AccessTokenScopePublicOnly AccessTokenScope = "public-only" // limited to public orgs/repos + + AccessTokenScopeReadActivityPub AccessTokenScope = "read:activitypub" + AccessTokenScopeWriteActivityPub AccessTokenScope = "write:activitypub" + + AccessTokenScopeReadAdmin AccessTokenScope = "read:admin" + AccessTokenScopeWriteAdmin AccessTokenScope = "write:admin" + + AccessTokenScopeReadMisc AccessTokenScope = "read:misc" + AccessTokenScopeWriteMisc AccessTokenScope = "write:misc" + + AccessTokenScopeReadNotification AccessTokenScope = "read:notification" + AccessTokenScopeWriteNotification AccessTokenScope = "write:notification" + + AccessTokenScopeReadOrganization AccessTokenScope = "read:organization" + AccessTokenScopeWriteOrganization AccessTokenScope = "write:organization" + + AccessTokenScopeReadPackage AccessTokenScope = "read:package" + AccessTokenScopeWritePackage AccessTokenScope = "write:package" + + AccessTokenScopeReadIssue AccessTokenScope = "read:issue" + AccessTokenScopeWriteIssue AccessTokenScope = "write:issue" + + AccessTokenScopeReadRepository AccessTokenScope = "read:repository" + AccessTokenScopeWriteRepository AccessTokenScope = "write:repository" + + AccessTokenScopeReadUser AccessTokenScope = "read:user" + AccessTokenScopeWriteUser AccessTokenScope = "write:user" +) + +// accessTokenScopeBitmap represents a bitmap of access token scopes. +type accessTokenScopeBitmap uint64 + +// Bitmap of each scope, including the child scopes. +const ( + // AccessTokenScopeAllBits is the bitmap of all access token scopes + accessTokenScopeAllBits accessTokenScopeBitmap = accessTokenScopeWriteActivityPubBits | + accessTokenScopeWriteAdminBits | accessTokenScopeWriteMiscBits | accessTokenScopeWriteNotificationBits | + accessTokenScopeWriteOrganizationBits | accessTokenScopeWritePackageBits | accessTokenScopeWriteIssueBits | + accessTokenScopeWriteRepositoryBits | accessTokenScopeWriteUserBits + + accessTokenScopePublicOnlyBits accessTokenScopeBitmap = 1 << iota + + accessTokenScopeReadActivityPubBits accessTokenScopeBitmap = 1 << iota + accessTokenScopeWriteActivityPubBits accessTokenScopeBitmap = 1< 64 scopes, + // refactoring the whole implementation in this file (and only this file) is needed. +) + +// allAccessTokenScopes contains all access token scopes. +// The order is important: parent scope must precede child scopes. +var allAccessTokenScopes = []AccessTokenScope{ + AccessTokenScopePublicOnly, + AccessTokenScopeWriteActivityPub, AccessTokenScopeReadActivityPub, + AccessTokenScopeWriteAdmin, AccessTokenScopeReadAdmin, + AccessTokenScopeWriteMisc, AccessTokenScopeReadMisc, + AccessTokenScopeWriteNotification, AccessTokenScopeReadNotification, + AccessTokenScopeWriteOrganization, AccessTokenScopeReadOrganization, + AccessTokenScopeWritePackage, AccessTokenScopeReadPackage, + AccessTokenScopeWriteIssue, AccessTokenScopeReadIssue, + AccessTokenScopeWriteRepository, AccessTokenScopeReadRepository, + AccessTokenScopeWriteUser, AccessTokenScopeReadUser, +} + +// allAccessTokenScopeBits contains all access token scopes. +var allAccessTokenScopeBits = map[AccessTokenScope]accessTokenScopeBitmap{ + AccessTokenScopeAll: accessTokenScopeAllBits, + AccessTokenScopePublicOnly: accessTokenScopePublicOnlyBits, + AccessTokenScopeReadActivityPub: accessTokenScopeReadActivityPubBits, + AccessTokenScopeWriteActivityPub: accessTokenScopeWriteActivityPubBits, + AccessTokenScopeReadAdmin: accessTokenScopeReadAdminBits, + AccessTokenScopeWriteAdmin: accessTokenScopeWriteAdminBits, + AccessTokenScopeReadMisc: accessTokenScopeReadMiscBits, + AccessTokenScopeWriteMisc: accessTokenScopeWriteMiscBits, + AccessTokenScopeReadNotification: accessTokenScopeReadNotificationBits, + AccessTokenScopeWriteNotification: accessTokenScopeWriteNotificationBits, + AccessTokenScopeReadOrganization: accessTokenScopeReadOrganizationBits, + AccessTokenScopeWriteOrganization: accessTokenScopeWriteOrganizationBits, + AccessTokenScopeReadPackage: accessTokenScopeReadPackageBits, + AccessTokenScopeWritePackage: accessTokenScopeWritePackageBits, + AccessTokenScopeReadIssue: accessTokenScopeReadIssueBits, + AccessTokenScopeWriteIssue: accessTokenScopeWriteIssueBits, + AccessTokenScopeReadRepository: accessTokenScopeReadRepositoryBits, + AccessTokenScopeWriteRepository: accessTokenScopeWriteRepositoryBits, + AccessTokenScopeReadUser: accessTokenScopeReadUserBits, + AccessTokenScopeWriteUser: accessTokenScopeWriteUserBits, +} + +// hasScope returns true if the string has the given scope +func (bitmap accessTokenScopeBitmap) hasScope(scope AccessTokenScope) (bool, error) { + expectedBits, ok := allAccessTokenScopeBits[scope] + if !ok { + return false, fmt.Errorf("invalid access token scope: %s", scope) + } + + return bitmap&expectedBits == expectedBits, nil +} + +// toScope returns a normalized scope string without any duplicates. +func (bitmap accessTokenScopeBitmap) toScope(unknownScopes *[]unknownAccessTokenScope) AccessTokenScope { + var scopes []string + + // Preserve unknown scopes, and put them at the beginning so that it's clear + // when debugging. + if unknownScopes != nil { + for _, unknownScope := range *unknownScopes { + scopes = append(scopes, string(unknownScope)) + } + } + + // iterate over all scopes, and reconstruct the bitmap + // if the reconstructed bitmap doesn't change, then the scope is already included + var reconstruct accessTokenScopeBitmap + + for _, singleScope := range allAccessTokenScopes { + // no need for error checking here, since we know the scope is valid + if ok, _ := bitmap.hasScope(singleScope); ok { + current := reconstruct | allAccessTokenScopeBits[singleScope] + if current == reconstruct { + continue + } + + reconstruct = current + scopes = append(scopes, string(singleScope)) + } + } + + scope := AccessTokenScope(strings.Join(scopes, ",")) + scope = AccessTokenScope(strings.ReplaceAll( + string(scope), + "write:activitypub,write:admin,write:misc,write:notification,write:organization,write:package,write:issue,write:repository,write:user", + "all", + )) + return scope +} + +// parse the scope string into a bitmap, thus removing possible duplicates. +func (s AccessTokenScope) parse() (accessTokenScopeBitmap, *[]unknownAccessTokenScope) { + var bitmap accessTokenScopeBitmap + var unknownScopes []unknownAccessTokenScope + + // The following is the more performant equivalent of 'for _, v := range strings.Split(remainingScope, ",")' as this is hot code + remainingScopes := string(s) + for len(remainingScopes) > 0 { + i := strings.IndexByte(remainingScopes, ',') + var v string + if i < 0 { + v = remainingScopes + remainingScopes = "" + } else if i+1 >= len(remainingScopes) { + v = remainingScopes[:i] + remainingScopes = "" + } else { + v = remainingScopes[:i] + remainingScopes = remainingScopes[i+1:] + } + singleScope := AccessTokenScope(v) + if singleScope == "" { + continue + } + if singleScope == AccessTokenScopeAll { + bitmap |= accessTokenScopeAllBits + continue + } + + bits, ok := allAccessTokenScopeBits[singleScope] + if !ok { + unknownScopes = append(unknownScopes, unknownAccessTokenScope(string(singleScope))) + } + bitmap |= bits + } + + return bitmap, &unknownScopes +} + +// NormalizePreservingUnknown returns a normalized scope string without any +// duplicates. Unknown scopes are included. +func (s AccessTokenScope) NormalizePreservingUnknown() AccessTokenScope { + bitmap, unknownScopes := s.parse() + + return bitmap.toScope(unknownScopes) +} + +// OldAccessTokenScope represents the scope for an access token. +type OldAccessTokenScope string + +const ( + OldAccessTokenScopeAll OldAccessTokenScope = "all" + + OldAccessTokenScopeRepo OldAccessTokenScope = "repo" + OldAccessTokenScopeRepoStatus OldAccessTokenScope = "repo:status" + OldAccessTokenScopePublicRepo OldAccessTokenScope = "public_repo" + + OldAccessTokenScopeAdminOrg OldAccessTokenScope = "admin:org" + OldAccessTokenScopeWriteOrg OldAccessTokenScope = "write:org" + OldAccessTokenScopeReadOrg OldAccessTokenScope = "read:org" + + OldAccessTokenScopeAdminPublicKey OldAccessTokenScope = "admin:public_key" + OldAccessTokenScopeWritePublicKey OldAccessTokenScope = "write:public_key" + OldAccessTokenScopeReadPublicKey OldAccessTokenScope = "read:public_key" + + OldAccessTokenScopeAdminRepoHook OldAccessTokenScope = "admin:repo_hook" + OldAccessTokenScopeWriteRepoHook OldAccessTokenScope = "write:repo_hook" + OldAccessTokenScopeReadRepoHook OldAccessTokenScope = "read:repo_hook" + + OldAccessTokenScopeAdminOrgHook OldAccessTokenScope = "admin:org_hook" + + OldAccessTokenScopeNotification OldAccessTokenScope = "notification" + + OldAccessTokenScopeUser OldAccessTokenScope = "user" + OldAccessTokenScopeReadUser OldAccessTokenScope = "read:user" + OldAccessTokenScopeUserEmail OldAccessTokenScope = "user:email" + OldAccessTokenScopeUserFollow OldAccessTokenScope = "user:follow" + + OldAccessTokenScopeDeleteRepo OldAccessTokenScope = "delete_repo" + + OldAccessTokenScopePackage OldAccessTokenScope = "package" + OldAccessTokenScopeWritePackage OldAccessTokenScope = "write:package" + OldAccessTokenScopeReadPackage OldAccessTokenScope = "read:package" + OldAccessTokenScopeDeletePackage OldAccessTokenScope = "delete:package" + + OldAccessTokenScopeAdminGPGKey OldAccessTokenScope = "admin:gpg_key" + OldAccessTokenScopeWriteGPGKey OldAccessTokenScope = "write:gpg_key" + OldAccessTokenScopeReadGPGKey OldAccessTokenScope = "read:gpg_key" + + OldAccessTokenScopeAdminApplication OldAccessTokenScope = "admin:application" + OldAccessTokenScopeWriteApplication OldAccessTokenScope = "write:application" + OldAccessTokenScopeReadApplication OldAccessTokenScope = "read:application" + + OldAccessTokenScopeSudo OldAccessTokenScope = "sudo" +) + +var accessTokenScopeMap = map[OldAccessTokenScope][]AccessTokenScope{ + OldAccessTokenScopeAll: {AccessTokenScopeAll}, + OldAccessTokenScopeRepo: {AccessTokenScopeWriteRepository}, + OldAccessTokenScopeRepoStatus: {AccessTokenScopeWriteRepository}, + OldAccessTokenScopePublicRepo: {AccessTokenScopePublicOnly, AccessTokenScopeWriteRepository}, + OldAccessTokenScopeAdminOrg: {AccessTokenScopeWriteOrganization}, + OldAccessTokenScopeWriteOrg: {AccessTokenScopeWriteOrganization}, + OldAccessTokenScopeReadOrg: {AccessTokenScopeReadOrganization}, + OldAccessTokenScopeAdminPublicKey: {AccessTokenScopeWriteUser}, + OldAccessTokenScopeWritePublicKey: {AccessTokenScopeWriteUser}, + OldAccessTokenScopeReadPublicKey: {AccessTokenScopeReadUser}, + OldAccessTokenScopeAdminRepoHook: {AccessTokenScopeWriteRepository}, + OldAccessTokenScopeWriteRepoHook: {AccessTokenScopeWriteRepository}, + OldAccessTokenScopeReadRepoHook: {AccessTokenScopeReadRepository}, + OldAccessTokenScopeAdminOrgHook: {AccessTokenScopeWriteOrganization}, + OldAccessTokenScopeNotification: {AccessTokenScopeWriteNotification}, + OldAccessTokenScopeUser: {AccessTokenScopeWriteUser}, + OldAccessTokenScopeReadUser: {AccessTokenScopeReadUser}, + OldAccessTokenScopeUserEmail: {AccessTokenScopeWriteUser}, + OldAccessTokenScopeUserFollow: {AccessTokenScopeWriteUser}, + OldAccessTokenScopeDeleteRepo: {AccessTokenScopeWriteRepository}, + OldAccessTokenScopePackage: {AccessTokenScopeWritePackage}, + OldAccessTokenScopeWritePackage: {AccessTokenScopeWritePackage}, + OldAccessTokenScopeReadPackage: {AccessTokenScopeReadPackage}, + OldAccessTokenScopeDeletePackage: {AccessTokenScopeWritePackage}, + OldAccessTokenScopeAdminGPGKey: {AccessTokenScopeWriteUser}, + OldAccessTokenScopeWriteGPGKey: {AccessTokenScopeWriteUser}, + OldAccessTokenScopeReadGPGKey: {AccessTokenScopeReadUser}, + OldAccessTokenScopeAdminApplication: {AccessTokenScopeWriteUser}, + OldAccessTokenScopeWriteApplication: {AccessTokenScopeWriteUser}, + OldAccessTokenScopeReadApplication: {AccessTokenScopeReadUser}, + OldAccessTokenScopeSudo: {AccessTokenScopeWriteAdmin}, +} + +type AccessToken struct { + ID int64 `xorm:"pk autoincr"` + Scope string +} + +func ConvertScopedAccessTokens(x *xorm.Engine) error { + var tokens []*AccessToken + + if err := x.Find(&tokens); err != nil { + return err + } + + for _, token := range tokens { + var scopes []string + allNewScopesMap := make(map[AccessTokenScope]bool) + for _, oldScope := range strings.Split(token.Scope, ",") { + if newScopes, exists := accessTokenScopeMap[OldAccessTokenScope(oldScope)]; exists { + for _, newScope := range newScopes { + allNewScopesMap[newScope] = true + } + } else { + log.Debug("access token scope not recognized as old token scope %s; preserving it", oldScope) + scopes = append(scopes, oldScope) + } + } + + for s := range allNewScopesMap { + scopes = append(scopes, string(s)) + } + scope := AccessTokenScope(strings.Join(scopes, ",")) + + // normalize the scope + normScope := scope.NormalizePreservingUnknown() + + token.Scope = string(normScope) + + // update the db entry with the new scope + if _, err := x.Cols("scope").ID(token.ID).Update(token); err != nil { + return err + } + } + + return nil +} diff --git a/models/migrations/v1_20/v259_test.go b/models/migrations/v1_20/v260_test.go similarity index 100% rename from models/migrations/v1_20/v259_test.go rename to models/migrations/v1_20/v260_test.go diff --git a/models/migrations/v1_21/v260.go b/models/migrations/v1_21/v260.go deleted file mode 100644 index 6ca52c5998df7..0000000000000 --- a/models/migrations/v1_21/v260.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2023 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_21 //nolint - -import ( - "code.gitea.io/gitea/models/migrations/base" - - "xorm.io/xorm" -) - -func DropCustomLabelsColumnOfActionRunner(x *xorm.Engine) error { - sess := x.NewSession() - defer sess.Close() - - if err := sess.Begin(); err != nil { - return err - } - - // drop "custom_labels" cols - if err := base.DropTableColumns(sess, "action_runner", "custom_labels"); err != nil { - return err - } - - return sess.Commit() -} diff --git a/models/migrations/v1_21/v261.go b/models/migrations/v1_21/v261.go index 4ec1160d0b3eb..6ca52c5998df7 100644 --- a/models/migrations/v1_21/v261.go +++ b/models/migrations/v1_21/v261.go @@ -4,21 +4,23 @@ package v1_21 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" + "code.gitea.io/gitea/models/migrations/base" "xorm.io/xorm" ) -func CreateVariableTable(x *xorm.Engine) error { - type ActionVariable struct { - ID int64 `xorm:"pk autoincr"` - OwnerID int64 `xorm:"UNIQUE(owner_repo_name)"` - RepoID int64 `xorm:"INDEX UNIQUE(owner_repo_name)"` - Name string `xorm:"UNIQUE(owner_repo_name) NOT NULL"` - Data string `xorm:"LONGTEXT NOT NULL"` - CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"` - UpdatedUnix timeutil.TimeStamp `xorm:"updated"` +func DropCustomLabelsColumnOfActionRunner(x *xorm.Engine) error { + sess := x.NewSession() + defer sess.Close() + + if err := sess.Begin(); err != nil { + return err + } + + // drop "custom_labels" cols + if err := base.DropTableColumns(sess, "action_runner", "custom_labels"); err != nil { + return err } - return x.Sync(new(ActionVariable)) + return sess.Commit() } diff --git a/models/migrations/v1_21/v262.go b/models/migrations/v1_21/v262.go index 23e900572a223..4ec1160d0b3eb 100644 --- a/models/migrations/v1_21/v262.go +++ b/models/migrations/v1_21/v262.go @@ -4,13 +4,21 @@ package v1_21 //nolint import ( + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func AddTriggerEventToActionRun(x *xorm.Engine) error { - type ActionRun struct { - TriggerEvent string +func CreateVariableTable(x *xorm.Engine) error { + type ActionVariable struct { + ID int64 `xorm:"pk autoincr"` + OwnerID int64 `xorm:"UNIQUE(owner_repo_name)"` + RepoID int64 `xorm:"INDEX UNIQUE(owner_repo_name)"` + Name string `xorm:"UNIQUE(owner_repo_name) NOT NULL"` + Data string `xorm:"LONGTEXT NOT NULL"` + CreatedUnix timeutil.TimeStamp `xorm:"created NOT NULL"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated"` } - return x.Sync(new(ActionRun)) + return x.Sync(new(ActionVariable)) } diff --git a/models/migrations/v1_21/v263.go b/models/migrations/v1_21/v263.go index 88a5cb92b49fb..23e900572a223 100644 --- a/models/migrations/v1_21/v263.go +++ b/models/migrations/v1_21/v263.go @@ -4,38 +4,13 @@ package v1_21 //nolint import ( - "fmt" - "xorm.io/xorm" ) -// AddGitSizeAndLFSSizeToRepositoryTable: add GitSize and LFSSize columns to Repository -func AddGitSizeAndLFSSizeToRepositoryTable(x *xorm.Engine) error { - type Repository struct { - GitSize int64 `xorm:"NOT NULL DEFAULT 0"` - LFSSize int64 `xorm:"NOT NULL DEFAULT 0"` - } - - sess := x.NewSession() - defer sess.Close() - - if err := sess.Begin(); err != nil { - return err - } - - if err := sess.Sync2(new(Repository)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - - _, err := sess.Exec(`UPDATE repository SET lfs_size=(SELECT SUM(size) FROM lfs_meta_object WHERE lfs_meta_object.repository_id=repository.ID) WHERE EXISTS (SELECT 1 FROM lfs_meta_object WHERE lfs_meta_object.repository_id=repository.ID)`) - if err != nil { - return err - } - - _, err = sess.Exec(`UPDATE repository SET git_size = size - lfs_size`) - if err != nil { - return err +func AddTriggerEventToActionRun(x *xorm.Engine) error { + type ActionRun struct { + TriggerEvent string } - return sess.Commit() + return x.Sync(new(ActionRun)) } diff --git a/models/migrations/v1_21/v264.go b/models/migrations/v1_21/v264.go index 60b7a7acf7d0c..88a5cb92b49fb 100644 --- a/models/migrations/v1_21/v264.go +++ b/models/migrations/v1_21/v264.go @@ -4,90 +4,38 @@ package v1_21 //nolint import ( - "context" "fmt" - "code.gitea.io/gitea/models/db" - "code.gitea.io/gitea/modules/timeutil" - "xorm.io/xorm" ) -func AddBranchTable(x *xorm.Engine) error { - type Branch struct { - ID int64 - RepoID int64 `xorm:"UNIQUE(s)"` - Name string `xorm:"UNIQUE(s) NOT NULL"` - CommitID string - CommitMessage string `xorm:"TEXT"` - PusherID int64 - IsDeleted bool `xorm:"index"` - DeletedByID int64 - DeletedUnix timeutil.TimeStamp `xorm:"index"` - CommitTime timeutil.TimeStamp // The commit - CreatedUnix timeutil.TimeStamp `xorm:"created"` - UpdatedUnix timeutil.TimeStamp `xorm:"updated"` +// AddGitSizeAndLFSSizeToRepositoryTable: add GitSize and LFSSize columns to Repository +func AddGitSizeAndLFSSizeToRepositoryTable(x *xorm.Engine) error { + type Repository struct { + GitSize int64 `xorm:"NOT NULL DEFAULT 0"` + LFSSize int64 `xorm:"NOT NULL DEFAULT 0"` } - if err := x.Sync(new(Branch)); err != nil { - return err - } + sess := x.NewSession() + defer sess.Close() - if exist, err := x.IsTableExist("deleted_branches"); err != nil { + if err := sess.Begin(); err != nil { return err - } else if !exist { - return nil } - type DeletedBranch struct { - ID int64 - RepoID int64 `xorm:"index UNIQUE(s)"` - Name string `xorm:"UNIQUE(s) NOT NULL"` - Commit string - DeletedByID int64 - DeletedUnix timeutil.TimeStamp + if err := sess.Sync2(new(Repository)); err != nil { + return fmt.Errorf("Sync2: %w", err) } - var adminUserID int64 - has, err := x.Table("user"). - Select("id"). - Where("is_admin=?", true). - Asc("id"). // Reliably get the admin with the lowest ID. - Get(&adminUserID) + _, err := sess.Exec(`UPDATE repository SET lfs_size=(SELECT SUM(size) FROM lfs_meta_object WHERE lfs_meta_object.repository_id=repository.ID) WHERE EXISTS (SELECT 1 FROM lfs_meta_object WHERE lfs_meta_object.repository_id=repository.ID)`) if err != nil { return err - } else if !has { - return fmt.Errorf("no admin user found") } - branches := make([]Branch, 0, 100) - if err := db.Iterate(context.Background(), nil, func(ctx context.Context, deletedBranch *DeletedBranch) error { - branches = append(branches, Branch{ - RepoID: deletedBranch.RepoID, - Name: deletedBranch.Name, - CommitID: deletedBranch.Commit, - PusherID: adminUserID, - IsDeleted: true, - DeletedByID: deletedBranch.DeletedByID, - DeletedUnix: deletedBranch.DeletedUnix, - }) - if len(branches) >= 100 { - _, err := x.Insert(&branches) - if err != nil { - return err - } - branches = branches[:0] - } - return nil - }); err != nil { + _, err = sess.Exec(`UPDATE repository SET git_size = size - lfs_size`) + if err != nil { return err } - if len(branches) > 0 { - if _, err := x.Insert(&branches); err != nil { - return err - } - } - - return x.DropTables("deleted_branches") + return sess.Commit() } diff --git a/models/migrations/v1_21/v265.go b/models/migrations/v1_21/v265.go new file mode 100644 index 0000000000000..60b7a7acf7d0c --- /dev/null +++ b/models/migrations/v1_21/v265.go @@ -0,0 +1,93 @@ +// Copyright 2023 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_21 //nolint + +import ( + "context" + "fmt" + + "code.gitea.io/gitea/models/db" + "code.gitea.io/gitea/modules/timeutil" + + "xorm.io/xorm" +) + +func AddBranchTable(x *xorm.Engine) error { + type Branch struct { + ID int64 + RepoID int64 `xorm:"UNIQUE(s)"` + Name string `xorm:"UNIQUE(s) NOT NULL"` + CommitID string + CommitMessage string `xorm:"TEXT"` + PusherID int64 + IsDeleted bool `xorm:"index"` + DeletedByID int64 + DeletedUnix timeutil.TimeStamp `xorm:"index"` + CommitTime timeutil.TimeStamp // The commit + CreatedUnix timeutil.TimeStamp `xorm:"created"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated"` + } + + if err := x.Sync(new(Branch)); err != nil { + return err + } + + if exist, err := x.IsTableExist("deleted_branches"); err != nil { + return err + } else if !exist { + return nil + } + + type DeletedBranch struct { + ID int64 + RepoID int64 `xorm:"index UNIQUE(s)"` + Name string `xorm:"UNIQUE(s) NOT NULL"` + Commit string + DeletedByID int64 + DeletedUnix timeutil.TimeStamp + } + + var adminUserID int64 + has, err := x.Table("user"). + Select("id"). + Where("is_admin=?", true). + Asc("id"). // Reliably get the admin with the lowest ID. + Get(&adminUserID) + if err != nil { + return err + } else if !has { + return fmt.Errorf("no admin user found") + } + + branches := make([]Branch, 0, 100) + if err := db.Iterate(context.Background(), nil, func(ctx context.Context, deletedBranch *DeletedBranch) error { + branches = append(branches, Branch{ + RepoID: deletedBranch.RepoID, + Name: deletedBranch.Name, + CommitID: deletedBranch.Commit, + PusherID: adminUserID, + IsDeleted: true, + DeletedByID: deletedBranch.DeletedByID, + DeletedUnix: deletedBranch.DeletedUnix, + }) + if len(branches) >= 100 { + _, err := x.Insert(&branches) + if err != nil { + return err + } + branches = branches[:0] + } + return nil + }); err != nil { + return err + } + + if len(branches) > 0 { + if _, err := x.Insert(&branches); err != nil { + return err + } + } + + return x.DropTables("deleted_branches") +} diff --git a/models/migrations/v1_6/v70.go b/models/migrations/v1_6/v70.go deleted file mode 100644 index 74434a84a14f4..0000000000000 --- a/models/migrations/v1_6/v70.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2018 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_6 //nolint - -import ( - "fmt" - "time" - - "code.gitea.io/gitea/modules/setting" - - "xorm.io/xorm" -) - -func AddIssueDependencies(x *xorm.Engine) (err error) { - type IssueDependency struct { - ID int64 `xorm:"pk autoincr"` - UserID int64 `xorm:"NOT NULL"` - IssueID int64 `xorm:"NOT NULL"` - DependencyID int64 `xorm:"NOT NULL"` - Created time.Time `xorm:"-"` - CreatedUnix int64 `xorm:"created"` - Updated time.Time `xorm:"-"` - UpdatedUnix int64 `xorm:"updated"` - } - - const ( - v16UnitTypeCode = iota + 1 // 1 code - v16UnitTypeIssues // 2 issues - v16UnitTypePRs // 3 PRs - v16UnitTypeCommits // 4 Commits - v16UnitTypeReleases // 5 Releases - v16UnitTypeWiki // 6 Wiki - v16UnitTypeSettings // 7 Settings - v16UnitTypeExternalWiki // 8 ExternalWiki - v16UnitTypeExternalTracker // 9 ExternalTracker - ) - - if err = x.Sync(new(IssueDependency)); err != nil { - return fmt.Errorf("Error creating issue_dependency_table column definition: %w", err) - } - - // Update Comment definition - // This (copied) struct does only contain fields used by xorm as the only use here is to update the database - - // CommentType defines the comment type - type CommentType int - - // TimeStamp defines a timestamp - type TimeStamp int64 - - type Comment struct { - ID int64 `xorm:"pk autoincr"` - Type CommentType - PosterID int64 `xorm:"INDEX"` - IssueID int64 `xorm:"INDEX"` - LabelID int64 - OldMilestoneID int64 - MilestoneID int64 - OldAssigneeID int64 - AssigneeID int64 - OldTitle string - NewTitle string - DependentIssueID int64 - - CommitID int64 - Line int64 - Content string `xorm:"TEXT"` - - CreatedUnix TimeStamp `xorm:"INDEX created"` - UpdatedUnix TimeStamp `xorm:"INDEX updated"` - - // Reference issue in commit message - CommitSHA string `xorm:"VARCHAR(40)"` - } - - if err = x.Sync(new(Comment)); err != nil { - return fmt.Errorf("Error updating issue_comment table column definition: %w", err) - } - - // RepoUnit describes all units of a repository - type RepoUnit struct { - ID int64 - RepoID int64 `xorm:"INDEX(s)"` - Type int `xorm:"INDEX(s)"` - Config map[string]any `xorm:"JSON"` - CreatedUnix int64 `xorm:"INDEX CREATED"` - Created time.Time `xorm:"-"` - } - - // Updating existing issue units - units := make([]*RepoUnit, 0, 100) - err = x.Where("`type` = ?", v16UnitTypeIssues).Find(&units) - if err != nil { - return fmt.Errorf("Query repo units: %w", err) - } - for _, unit := range units { - if unit.Config == nil { - unit.Config = make(map[string]any) - } - if _, ok := unit.Config["EnableDependencies"]; !ok { - unit.Config["EnableDependencies"] = setting.Service.DefaultEnableDependencies - } - if _, err := x.ID(unit.ID).Cols("config").Update(unit); err != nil { - return err - } - } - - return err -} diff --git a/models/migrations/v1_6/v71.go b/models/migrations/v1_6/v71.go index effd2ebf99595..74434a84a14f4 100644 --- a/models/migrations/v1_6/v71.go +++ b/models/migrations/v1_6/v71.go @@ -5,76 +5,106 @@ package v1_6 //nolint import ( "fmt" + "time" - "code.gitea.io/gitea/models/migrations/base" - "code.gitea.io/gitea/modules/timeutil" - "code.gitea.io/gitea/modules/util" + "code.gitea.io/gitea/modules/setting" "xorm.io/xorm" ) -func AddScratchHash(x *xorm.Engine) error { - // TwoFactor see models/twofactor.go - type TwoFactor struct { - ID int64 `xorm:"pk autoincr"` - UID int64 `xorm:"UNIQUE"` - Secret string - ScratchToken string - ScratchSalt string - ScratchHash string - LastUsedPasscode string `xorm:"VARCHAR(10)"` - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` +func AddIssueDependencies(x *xorm.Engine) (err error) { + type IssueDependency struct { + ID int64 `xorm:"pk autoincr"` + UserID int64 `xorm:"NOT NULL"` + IssueID int64 `xorm:"NOT NULL"` + DependencyID int64 `xorm:"NOT NULL"` + Created time.Time `xorm:"-"` + CreatedUnix int64 `xorm:"created"` + Updated time.Time `xorm:"-"` + UpdatedUnix int64 `xorm:"updated"` } - if err := x.Sync2(new(TwoFactor)); err != nil { - return fmt.Errorf("Sync2: %w", err) + const ( + v16UnitTypeCode = iota + 1 // 1 code + v16UnitTypeIssues // 2 issues + v16UnitTypePRs // 3 PRs + v16UnitTypeCommits // 4 Commits + v16UnitTypeReleases // 5 Releases + v16UnitTypeWiki // 6 Wiki + v16UnitTypeSettings // 7 Settings + v16UnitTypeExternalWiki // 8 ExternalWiki + v16UnitTypeExternalTracker // 9 ExternalTracker + ) + + if err = x.Sync(new(IssueDependency)); err != nil { + return fmt.Errorf("Error creating issue_dependency_table column definition: %w", err) } - sess := x.NewSession() - defer sess.Close() + // Update Comment definition + // This (copied) struct does only contain fields used by xorm as the only use here is to update the database - if err := sess.Begin(); err != nil { - return err - } + // CommentType defines the comment type + type CommentType int - // transform all tokens to hashes - const batchSize = 100 - for start := 0; ; start += batchSize { - tfas := make([]*TwoFactor, 0, batchSize) - if err := sess.Limit(batchSize, start).Find(&tfas); err != nil { - return err - } - if len(tfas) == 0 { - break - } + // TimeStamp defines a timestamp + type TimeStamp int64 - for _, tfa := range tfas { - // generate salt - salt, err := util.CryptoRandomString(10) - if err != nil { - return err - } - tfa.ScratchSalt = salt - tfa.ScratchHash = base.HashToken(tfa.ScratchToken, salt) + type Comment struct { + ID int64 `xorm:"pk autoincr"` + Type CommentType + PosterID int64 `xorm:"INDEX"` + IssueID int64 `xorm:"INDEX"` + LabelID int64 + OldMilestoneID int64 + MilestoneID int64 + OldAssigneeID int64 + AssigneeID int64 + OldTitle string + NewTitle string + DependentIssueID int64 - if _, err := sess.ID(tfa.ID).Cols("scratch_salt, scratch_hash").Update(tfa); err != nil { - return fmt.Errorf("couldn't add in scratch_hash and scratch_salt: %w", err) - } + CommitID int64 + Line int64 + Content string `xorm:"TEXT"` - } + CreatedUnix TimeStamp `xorm:"INDEX created"` + UpdatedUnix TimeStamp `xorm:"INDEX updated"` + + // Reference issue in commit message + CommitSHA string `xorm:"VARCHAR(40)"` } - // Commit and begin new transaction for dropping columns - if err := sess.Commit(); err != nil { - return err + if err = x.Sync(new(Comment)); err != nil { + return fmt.Errorf("Error updating issue_comment table column definition: %w", err) } - if err := sess.Begin(); err != nil { - return err + + // RepoUnit describes all units of a repository + type RepoUnit struct { + ID int64 + RepoID int64 `xorm:"INDEX(s)"` + Type int `xorm:"INDEX(s)"` + Config map[string]any `xorm:"JSON"` + CreatedUnix int64 `xorm:"INDEX CREATED"` + Created time.Time `xorm:"-"` } - if err := base.DropTableColumns(sess, "two_factor", "scratch_token"); err != nil { - return err + // Updating existing issue units + units := make([]*RepoUnit, 0, 100) + err = x.Where("`type` = ?", v16UnitTypeIssues).Find(&units) + if err != nil { + return fmt.Errorf("Query repo units: %w", err) + } + for _, unit := range units { + if unit.Config == nil { + unit.Config = make(map[string]any) + } + if _, ok := unit.Config["EnableDependencies"]; !ok { + unit.Config["EnableDependencies"] = setting.Service.DefaultEnableDependencies + } + if _, err := x.ID(unit.ID).Cols("config").Update(unit); err != nil { + return err + } } - return sess.Commit() + + return err } diff --git a/models/migrations/v1_6/v72.go b/models/migrations/v1_6/v72.go index ce963eb371480..effd2ebf99595 100644 --- a/models/migrations/v1_6/v72.go +++ b/models/migrations/v1_6/v72.go @@ -6,25 +6,75 @@ package v1_6 //nolint import ( "fmt" + "code.gitea.io/gitea/models/migrations/base" "code.gitea.io/gitea/modules/timeutil" + "code.gitea.io/gitea/modules/util" "xorm.io/xorm" ) -func AddReview(x *xorm.Engine) error { - // Review see models/review.go - type Review struct { - ID int64 `xorm:"pk autoincr"` - Type string - ReviewerID int64 `xorm:"index"` - IssueID int64 `xorm:"index"` - Content string - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` +func AddScratchHash(x *xorm.Engine) error { + // TwoFactor see models/twofactor.go + type TwoFactor struct { + ID int64 `xorm:"pk autoincr"` + UID int64 `xorm:"UNIQUE"` + Secret string + ScratchToken string + ScratchSalt string + ScratchHash string + LastUsedPasscode string `xorm:"VARCHAR(10)"` + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` } - if err := x.Sync2(new(Review)); err != nil { + if err := x.Sync2(new(TwoFactor)); err != nil { return fmt.Errorf("Sync2: %w", err) } - return nil + + sess := x.NewSession() + defer sess.Close() + + if err := sess.Begin(); err != nil { + return err + } + + // transform all tokens to hashes + const batchSize = 100 + for start := 0; ; start += batchSize { + tfas := make([]*TwoFactor, 0, batchSize) + if err := sess.Limit(batchSize, start).Find(&tfas); err != nil { + return err + } + if len(tfas) == 0 { + break + } + + for _, tfa := range tfas { + // generate salt + salt, err := util.CryptoRandomString(10) + if err != nil { + return err + } + tfa.ScratchSalt = salt + tfa.ScratchHash = base.HashToken(tfa.ScratchToken, salt) + + if _, err := sess.ID(tfa.ID).Cols("scratch_salt, scratch_hash").Update(tfa); err != nil { + return fmt.Errorf("couldn't add in scratch_hash and scratch_salt: %w", err) + } + + } + } + + // Commit and begin new transaction for dropping columns + if err := sess.Commit(); err != nil { + return err + } + if err := sess.Begin(); err != nil { + return err + } + + if err := base.DropTableColumns(sess, "two_factor", "scratch_token"); err != nil { + return err + } + return sess.Commit() } diff --git a/models/migrations/v1_6/v73.go b/models/migrations/v1_6/v73.go new file mode 100644 index 0000000000000..ce963eb371480 --- /dev/null +++ b/models/migrations/v1_6/v73.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_6 //nolint + +import ( + "fmt" + + "code.gitea.io/gitea/modules/timeutil" + + "xorm.io/xorm" +) + +func AddReview(x *xorm.Engine) error { + // Review see models/review.go + type Review struct { + ID int64 `xorm:"pk autoincr"` + Type string + ReviewerID int64 `xorm:"index"` + IssueID int64 `xorm:"index"` + Content string + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + } + + if err := x.Sync2(new(Review)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } + return nil +} diff --git a/models/migrations/v1_7/v73.go b/models/migrations/v1_7/v73.go deleted file mode 100644 index 1013daedbd635..0000000000000 --- a/models/migrations/v1_7/v73.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_7 //nolint - -import ( - "xorm.io/xorm" -) - -func AddMustChangePassword(x *xorm.Engine) error { - // User see models/user.go - type User struct { - ID int64 `xorm:"pk autoincr"` - MustChangePassword bool `xorm:"NOT NULL DEFAULT false"` - } - - return x.Sync2(new(User)) -} diff --git a/models/migrations/v1_7/v74.go b/models/migrations/v1_7/v74.go index bdd89d1f342a4..1013daedbd635 100644 --- a/models/migrations/v1_7/v74.go +++ b/models/migrations/v1_7/v74.go @@ -3,13 +3,16 @@ package v1_7 //nolint -import "xorm.io/xorm" +import ( + "xorm.io/xorm" +) -func AddApprovalWhitelistsToProtectedBranches(x *xorm.Engine) error { - type ProtectedBranch struct { - ApprovalsWhitelistUserIDs []int64 `xorm:"JSON TEXT"` - ApprovalsWhitelistTeamIDs []int64 `xorm:"JSON TEXT"` - RequiredApprovals int64 `xorm:"NOT NULL DEFAULT 0"` +func AddMustChangePassword(x *xorm.Engine) error { + // User see models/user.go + type User struct { + ID int64 `xorm:"pk autoincr"` + MustChangePassword bool `xorm:"NOT NULL DEFAULT false"` } - return x.Sync2(new(ProtectedBranch)) + + return x.Sync2(new(User)) } diff --git a/models/migrations/v1_7/v75.go b/models/migrations/v1_7/v75.go index fa7430970c91d..bdd89d1f342a4 100644 --- a/models/migrations/v1_7/v75.go +++ b/models/migrations/v1_7/v75.go @@ -3,30 +3,13 @@ package v1_7 //nolint -import ( - "xorm.io/builder" - "xorm.io/xorm" -) +import "xorm.io/xorm" -func ClearNonusedData(x *xorm.Engine) error { - condDelete := func(colName string) builder.Cond { - return builder.NotIn(colName, builder.Select("id").From("`user`")) +func AddApprovalWhitelistsToProtectedBranches(x *xorm.Engine) error { + type ProtectedBranch struct { + ApprovalsWhitelistUserIDs []int64 `xorm:"JSON TEXT"` + ApprovalsWhitelistTeamIDs []int64 `xorm:"JSON TEXT"` + RequiredApprovals int64 `xorm:"NOT NULL DEFAULT 0"` } - - if _, err := x.Exec(builder.Delete(condDelete("uid")).From("team_user")); err != nil { - return err - } - - if _, err := x.Exec(builder.Delete(condDelete("user_id")).From("collaboration")); err != nil { - return err - } - - if _, err := x.Exec(builder.Delete(condDelete("user_id")).From("stopwatch")); err != nil { - return err - } - - if _, err := x.Exec(builder.Delete(condDelete("owner_id")).From("gpg_key")); err != nil { - return err - } - return nil + return x.Sync2(new(ProtectedBranch)) } diff --git a/models/migrations/v1_7/v76.go b/models/migrations/v1_7/v76.go new file mode 100644 index 0000000000000..fa7430970c91d --- /dev/null +++ b/models/migrations/v1_7/v76.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_7 //nolint + +import ( + "xorm.io/builder" + "xorm.io/xorm" +) + +func ClearNonusedData(x *xorm.Engine) error { + condDelete := func(colName string) builder.Cond { + return builder.NotIn(colName, builder.Select("id").From("`user`")) + } + + if _, err := x.Exec(builder.Delete(condDelete("uid")).From("team_user")); err != nil { + return err + } + + if _, err := x.Exec(builder.Delete(condDelete("user_id")).From("collaboration")); err != nil { + return err + } + + if _, err := x.Exec(builder.Delete(condDelete("user_id")).From("stopwatch")); err != nil { + return err + } + + if _, err := x.Exec(builder.Delete(condDelete("owner_id")).From("gpg_key")); err != nil { + return err + } + return nil +} diff --git a/models/migrations/v1_8/v76.go b/models/migrations/v1_8/v76.go deleted file mode 100644 index d3fbd94deb104..0000000000000 --- a/models/migrations/v1_8/v76.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2018 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_8 //nolint - -import ( - "fmt" - - "code.gitea.io/gitea/modules/timeutil" - - "xorm.io/xorm" -) - -func AddPullRequestRebaseWithMerge(x *xorm.Engine) error { - // RepoUnit describes all units of a repository - type RepoUnit struct { - ID int64 - RepoID int64 `xorm:"INDEX(s)"` - Type int `xorm:"INDEX(s)"` - Config map[string]any `xorm:"JSON"` - CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"` - } - - const ( - v16UnitTypeCode = iota + 1 // 1 code - v16UnitTypeIssues // 2 issues - v16UnitTypePRs // 3 PRs - v16UnitTypeCommits // 4 Commits - v16UnitTypeReleases // 5 Releases - v16UnitTypeWiki // 6 Wiki - v16UnitTypeSettings // 7 Settings - v16UnitTypeExternalWiki // 8 ExternalWiki - v16UnitTypeExternalTracker // 9 ExternalTracker - ) - - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - - // Updating existing issue units - units := make([]*RepoUnit, 0, 100) - if err := sess.Where("`type` = ?", v16UnitTypePRs).Find(&units); err != nil { - return fmt.Errorf("Query repo units: %w", err) - } - for _, unit := range units { - if unit.Config == nil { - unit.Config = make(map[string]any) - } - // Allow the new merge style if all other merge styles are allowed - allowMergeRebase := true - - if allowMerge, ok := unit.Config["AllowMerge"]; ok { - allowMergeRebase = allowMergeRebase && allowMerge.(bool) - } - - if allowRebase, ok := unit.Config["AllowRebase"]; ok { - allowMergeRebase = allowMergeRebase && allowRebase.(bool) - } - - if allowSquash, ok := unit.Config["AllowSquash"]; ok { - allowMergeRebase = allowMergeRebase && allowSquash.(bool) - } - - if _, ok := unit.Config["AllowRebaseMerge"]; !ok { - unit.Config["AllowRebaseMerge"] = allowMergeRebase - } - if _, err := sess.ID(unit.ID).Cols("config").Update(unit); err != nil { - return err - } - } - return sess.Commit() -} diff --git a/models/migrations/v1_8/v77.go b/models/migrations/v1_8/v77.go index 2305984c129ed..d3fbd94deb104 100644 --- a/models/migrations/v1_8/v77.go +++ b/models/migrations/v1_8/v77.go @@ -1,16 +1,74 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. +// Copyright 2018 The Gitea Authors. All rights reserved. // SPDX-License-Identifier: MIT package v1_8 //nolint import ( + "fmt" + + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func AddUserDefaultTheme(x *xorm.Engine) error { - type User struct { - Theme string `xorm:"VARCHAR(30) NOT NULL DEFAULT ''"` +func AddPullRequestRebaseWithMerge(x *xorm.Engine) error { + // RepoUnit describes all units of a repository + type RepoUnit struct { + ID int64 + RepoID int64 `xorm:"INDEX(s)"` + Type int `xorm:"INDEX(s)"` + Config map[string]any `xorm:"JSON"` + CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"` + } + + const ( + v16UnitTypeCode = iota + 1 // 1 code + v16UnitTypeIssues // 2 issues + v16UnitTypePRs // 3 PRs + v16UnitTypeCommits // 4 Commits + v16UnitTypeReleases // 5 Releases + v16UnitTypeWiki // 6 Wiki + v16UnitTypeSettings // 7 Settings + v16UnitTypeExternalWiki // 8 ExternalWiki + v16UnitTypeExternalTracker // 9 ExternalTracker + ) + + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err } - return x.Sync2(new(User)) + // Updating existing issue units + units := make([]*RepoUnit, 0, 100) + if err := sess.Where("`type` = ?", v16UnitTypePRs).Find(&units); err != nil { + return fmt.Errorf("Query repo units: %w", err) + } + for _, unit := range units { + if unit.Config == nil { + unit.Config = make(map[string]any) + } + // Allow the new merge style if all other merge styles are allowed + allowMergeRebase := true + + if allowMerge, ok := unit.Config["AllowMerge"]; ok { + allowMergeRebase = allowMergeRebase && allowMerge.(bool) + } + + if allowRebase, ok := unit.Config["AllowRebase"]; ok { + allowMergeRebase = allowMergeRebase && allowRebase.(bool) + } + + if allowSquash, ok := unit.Config["AllowSquash"]; ok { + allowMergeRebase = allowMergeRebase && allowSquash.(bool) + } + + if _, ok := unit.Config["AllowRebaseMerge"]; !ok { + unit.Config["AllowRebaseMerge"] = allowMergeRebase + } + if _, err := sess.ID(unit.ID).Cols("config").Update(unit); err != nil { + return err + } + } + return sess.Commit() } diff --git a/models/migrations/v1_8/v78.go b/models/migrations/v1_8/v78.go index 637db451f5b8f..2305984c129ed 100644 --- a/models/migrations/v1_8/v78.go +++ b/models/migrations/v1_8/v78.go @@ -4,40 +4,13 @@ package v1_8 //nolint import ( - "code.gitea.io/gitea/models/migrations/base" - "xorm.io/xorm" ) -func RenameRepoIsBareToIsEmpty(x *xorm.Engine) error { - type Repository struct { - ID int64 `xorm:"pk autoincr"` - IsBare bool - IsEmpty bool `xorm:"INDEX"` - } - - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - - if err := sess.Sync2(new(Repository)); err != nil { - return err - } - if _, err := sess.Exec("UPDATE repository SET is_empty = is_bare;"); err != nil { - return err - } - if err := sess.Commit(); err != nil { - return err - } - - if err := sess.Begin(); err != nil { - return err - } - if err := base.DropTableColumns(sess, "repository", "is_bare"); err != nil { - return err +func AddUserDefaultTheme(x *xorm.Engine) error { + type User struct { + Theme string `xorm:"VARCHAR(30) NOT NULL DEFAULT ''"` } - return sess.Commit() + return x.Sync2(new(User)) } diff --git a/models/migrations/v1_8/v79.go b/models/migrations/v1_8/v79.go index e708d7229464e..637db451f5b8f 100644 --- a/models/migrations/v1_8/v79.go +++ b/models/migrations/v1_8/v79.go @@ -4,22 +4,40 @@ package v1_8 //nolint import ( - "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/models/migrations/base" "xorm.io/xorm" ) -func AddCanCloseIssuesViaCommitInAnyBranch(x *xorm.Engine) error { +func RenameRepoIsBareToIsEmpty(x *xorm.Engine) error { type Repository struct { - ID int64 `xorm:"pk autoincr"` - CloseIssuesViaCommitInAnyBranch bool `xorm:"NOT NULL DEFAULT false"` + ID int64 `xorm:"pk autoincr"` + IsBare bool + IsEmpty bool `xorm:"INDEX"` } - if err := x.Sync2(new(Repository)); err != nil { + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { return err } - _, err := x.Exec("UPDATE repository SET close_issues_via_commit_in_any_branch = ?", - setting.Repository.DefaultCloseIssuesViaCommitsInAnyBranch) - return err + if err := sess.Sync2(new(Repository)); err != nil { + return err + } + if _, err := sess.Exec("UPDATE repository SET is_empty = is_bare;"); err != nil { + return err + } + if err := sess.Commit(); err != nil { + return err + } + + if err := sess.Begin(); err != nil { + return err + } + if err := base.DropTableColumns(sess, "repository", "is_bare"); err != nil { + return err + } + + return sess.Commit() } diff --git a/models/migrations/v1_8/v80.go b/models/migrations/v1_8/v80.go index 7f2e0ff72bf09..e708d7229464e 100644 --- a/models/migrations/v1_8/v80.go +++ b/models/migrations/v1_8/v80.go @@ -3,14 +3,23 @@ package v1_8 //nolint -import "xorm.io/xorm" +import ( + "code.gitea.io/gitea/modules/setting" -func AddIsLockedToIssues(x *xorm.Engine) error { - // Issue see models/issue.go - type Issue struct { - ID int64 `xorm:"pk autoincr"` - IsLocked bool `xorm:"NOT NULL DEFAULT false"` + "xorm.io/xorm" +) + +func AddCanCloseIssuesViaCommitInAnyBranch(x *xorm.Engine) error { + type Repository struct { + ID int64 `xorm:"pk autoincr"` + CloseIssuesViaCommitInAnyBranch bool `xorm:"NOT NULL DEFAULT false"` + } + + if err := x.Sync2(new(Repository)); err != nil { + return err } - return x.Sync2(new(Issue)) + _, err := x.Exec("UPDATE repository SET close_issues_via_commit_in_any_branch = ?", + setting.Repository.DefaultCloseIssuesViaCommitsInAnyBranch) + return err } diff --git a/models/migrations/v1_8/v81.go b/models/migrations/v1_8/v81.go index a100dc1ef71f1..7f2e0ff72bf09 100644 --- a/models/migrations/v1_8/v81.go +++ b/models/migrations/v1_8/v81.go @@ -3,28 +3,14 @@ package v1_8 //nolint -import ( - "fmt" +import "xorm.io/xorm" - "xorm.io/xorm" - "xorm.io/xorm/schemas" -) - -func ChangeU2FCounterType(x *xorm.Engine) error { - var err error - - switch x.Dialect().URI().DBType { - case schemas.MYSQL: - _, err = x.Exec("ALTER TABLE `u2f_registration` MODIFY `counter` BIGINT") - case schemas.POSTGRES: - _, err = x.Exec("ALTER TABLE `u2f_registration` ALTER COLUMN `counter` SET DATA TYPE bigint") - case schemas.MSSQL: - _, err = x.Exec("ALTER TABLE `u2f_registration` ALTER COLUMN `counter` BIGINT") - } - - if err != nil { - return fmt.Errorf("Error changing u2f_registration counter column type: %w", err) +func AddIsLockedToIssues(x *xorm.Engine) error { + // Issue see models/issue.go + type Issue struct { + ID int64 `xorm:"pk autoincr"` + IsLocked bool `xorm:"NOT NULL DEFAULT false"` } - return nil + return x.Sync2(new(Issue)) } diff --git a/models/migrations/v1_8/v82.go b/models/migrations/v1_8/v82.go new file mode 100644 index 0000000000000..a100dc1ef71f1 --- /dev/null +++ b/models/migrations/v1_8/v82.go @@ -0,0 +1,30 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_8 //nolint + +import ( + "fmt" + + "xorm.io/xorm" + "xorm.io/xorm/schemas" +) + +func ChangeU2FCounterType(x *xorm.Engine) error { + var err error + + switch x.Dialect().URI().DBType { + case schemas.MYSQL: + _, err = x.Exec("ALTER TABLE `u2f_registration` MODIFY `counter` BIGINT") + case schemas.POSTGRES: + _, err = x.Exec("ALTER TABLE `u2f_registration` ALTER COLUMN `counter` SET DATA TYPE bigint") + case schemas.MSSQL: + _, err = x.Exec("ALTER TABLE `u2f_registration` ALTER COLUMN `counter` BIGINT") + } + + if err != nil { + return fmt.Errorf("Error changing u2f_registration counter column type: %w", err) + } + + return nil +} diff --git a/models/migrations/v1_9/v82.go b/models/migrations/v1_9/v82.go deleted file mode 100644 index 26806dd64505d..0000000000000 --- a/models/migrations/v1_9/v82.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package v1_9 //nolint - -import ( - "fmt" - "path/filepath" - "strings" - - "code.gitea.io/gitea/modules/git" - "code.gitea.io/gitea/modules/setting" - - "xorm.io/xorm" -) - -func FixReleaseSha1OnReleaseTable(x *xorm.Engine) error { - type Release struct { - ID int64 - RepoID int64 - Sha1 string - TagName string - } - - type Repository struct { - ID int64 - OwnerID int64 - Name string - } - - type User struct { - ID int64 - Name string - } - - // UserPath returns the path absolute path of user repositories. - UserPath := func(userName string) string { - return filepath.Join(setting.RepoRootPath, strings.ToLower(userName)) - } - - // RepoPath returns repository path by given user and repository name. - RepoPath := func(userName, repoName string) string { - return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git") - } - - // Update release sha1 - const batchSize = 100 - sess := x.NewSession() - defer sess.Close() - - var ( - err error - count int - gitRepoCache = make(map[int64]*git.Repository) - repoCache = make(map[int64]*Repository) - userCache = make(map[int64]*User) - ) - - if err = sess.Begin(); err != nil { - return err - } - - for start := 0; ; start += batchSize { - releases := make([]*Release, 0, batchSize) - if err = sess.Limit(batchSize, start).Asc("id").Where("is_tag=?", false).Find(&releases); err != nil { - return err - } - if len(releases) == 0 { - break - } - - for _, release := range releases { - gitRepo, ok := gitRepoCache[release.RepoID] - if !ok { - repo, ok := repoCache[release.RepoID] - if !ok { - repo = new(Repository) - has, err := sess.ID(release.RepoID).Get(repo) - if err != nil { - return err - } else if !has { - return fmt.Errorf("Repository %d is not exist", release.RepoID) - } - - repoCache[release.RepoID] = repo - } - - user, ok := userCache[repo.OwnerID] - if !ok { - user = new(User) - has, err := sess.ID(repo.OwnerID).Get(user) - if err != nil { - return err - } else if !has { - return fmt.Errorf("User %d is not exist", repo.OwnerID) - } - - userCache[repo.OwnerID] = user - } - - gitRepo, err = git.OpenRepository(git.DefaultContext, RepoPath(user.Name, repo.Name)) - if err != nil { - return err - } - defer gitRepo.Close() - gitRepoCache[release.RepoID] = gitRepo - } - - release.Sha1, err = gitRepo.GetTagCommitID(release.TagName) - if err != nil && !git.IsErrNotExist(err) { - return err - } - - if err == nil { - if _, err = sess.ID(release.ID).Cols("sha1").Update(release); err != nil { - return err - } - } - - count++ - if count >= 1000 { - if err = sess.Commit(); err != nil { - return err - } - if err = sess.Begin(); err != nil { - return err - } - count = 0 - } - } - } - return sess.Commit() -} diff --git a/models/migrations/v1_9/v83.go b/models/migrations/v1_9/v83.go index afe504e9c5f96..26806dd64505d 100644 --- a/models/migrations/v1_9/v83.go +++ b/models/migrations/v1_9/v83.go @@ -4,24 +4,130 @@ package v1_9 //nolint import ( - "code.gitea.io/gitea/modules/timeutil" + "fmt" + "path/filepath" + "strings" + + "code.gitea.io/gitea/modules/git" + "code.gitea.io/gitea/modules/setting" "xorm.io/xorm" ) -func AddUploaderIDForAttachment(x *xorm.Engine) error { - type Attachment struct { - ID int64 `xorm:"pk autoincr"` - UUID string `xorm:"uuid UNIQUE"` - IssueID int64 `xorm:"INDEX"` - ReleaseID int64 `xorm:"INDEX"` - UploaderID int64 `xorm:"INDEX DEFAULT 0"` - CommentID int64 - Name string - DownloadCount int64 `xorm:"DEFAULT 0"` - Size int64 `xorm:"DEFAULT 0"` - CreatedUnix timeutil.TimeStamp `xorm:"created"` +func FixReleaseSha1OnReleaseTable(x *xorm.Engine) error { + type Release struct { + ID int64 + RepoID int64 + Sha1 string + TagName string + } + + type Repository struct { + ID int64 + OwnerID int64 + Name string + } + + type User struct { + ID int64 + Name string + } + + // UserPath returns the path absolute path of user repositories. + UserPath := func(userName string) string { + return filepath.Join(setting.RepoRootPath, strings.ToLower(userName)) + } + + // RepoPath returns repository path by given user and repository name. + RepoPath := func(userName, repoName string) string { + return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git") + } + + // Update release sha1 + const batchSize = 100 + sess := x.NewSession() + defer sess.Close() + + var ( + err error + count int + gitRepoCache = make(map[int64]*git.Repository) + repoCache = make(map[int64]*Repository) + userCache = make(map[int64]*User) + ) + + if err = sess.Begin(); err != nil { + return err } - return x.Sync2(new(Attachment)) + for start := 0; ; start += batchSize { + releases := make([]*Release, 0, batchSize) + if err = sess.Limit(batchSize, start).Asc("id").Where("is_tag=?", false).Find(&releases); err != nil { + return err + } + if len(releases) == 0 { + break + } + + for _, release := range releases { + gitRepo, ok := gitRepoCache[release.RepoID] + if !ok { + repo, ok := repoCache[release.RepoID] + if !ok { + repo = new(Repository) + has, err := sess.ID(release.RepoID).Get(repo) + if err != nil { + return err + } else if !has { + return fmt.Errorf("Repository %d is not exist", release.RepoID) + } + + repoCache[release.RepoID] = repo + } + + user, ok := userCache[repo.OwnerID] + if !ok { + user = new(User) + has, err := sess.ID(repo.OwnerID).Get(user) + if err != nil { + return err + } else if !has { + return fmt.Errorf("User %d is not exist", repo.OwnerID) + } + + userCache[repo.OwnerID] = user + } + + gitRepo, err = git.OpenRepository(git.DefaultContext, RepoPath(user.Name, repo.Name)) + if err != nil { + return err + } + defer gitRepo.Close() + gitRepoCache[release.RepoID] = gitRepo + } + + release.Sha1, err = gitRepo.GetTagCommitID(release.TagName) + if err != nil && !git.IsErrNotExist(err) { + return err + } + + if err == nil { + if _, err = sess.ID(release.ID).Cols("sha1").Update(release); err != nil { + return err + } + } + + count++ + if count >= 1000 { + if err = sess.Commit(); err != nil { + return err + } + if err = sess.Begin(); err != nil { + return err + } + count = 0 + } + } + } + return sess.Commit() } diff --git a/models/migrations/v1_9/v84.go b/models/migrations/v1_9/v84.go index 13fc238d4801c..afe504e9c5f96 100644 --- a/models/migrations/v1_9/v84.go +++ b/models/migrations/v1_9/v84.go @@ -4,14 +4,24 @@ package v1_9 //nolint import ( + "code.gitea.io/gitea/modules/timeutil" + "xorm.io/xorm" ) -func AddGPGKeyImport(x *xorm.Engine) error { - type GPGKeyImport struct { - KeyID string `xorm:"pk CHAR(16) NOT NULL"` - Content string `xorm:"TEXT NOT NULL"` +func AddUploaderIDForAttachment(x *xorm.Engine) error { + type Attachment struct { + ID int64 `xorm:"pk autoincr"` + UUID string `xorm:"uuid UNIQUE"` + IssueID int64 `xorm:"INDEX"` + ReleaseID int64 `xorm:"INDEX"` + UploaderID int64 `xorm:"INDEX DEFAULT 0"` + CommentID int64 + Name string + DownloadCount int64 `xorm:"DEFAULT 0"` + Size int64 `xorm:"DEFAULT 0"` + CreatedUnix timeutil.TimeStamp `xorm:"created"` } - return x.Sync2(new(GPGKeyImport)) + return x.Sync2(new(Attachment)) } diff --git a/models/migrations/v1_9/v85.go b/models/migrations/v1_9/v85.go index 6cf7faaad6392..13fc238d4801c 100644 --- a/models/migrations/v1_9/v85.go +++ b/models/migrations/v1_9/v85.go @@ -4,116 +4,14 @@ package v1_9 //nolint import ( - "fmt" - - "code.gitea.io/gitea/models/migrations/base" - "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/timeutil" - "code.gitea.io/gitea/modules/util" - "xorm.io/xorm" ) -func HashAppToken(x *xorm.Engine) error { - // AccessToken see models/token.go - type AccessToken struct { - ID int64 `xorm:"pk autoincr"` - UID int64 `xorm:"INDEX"` - Name string - Sha1 string - Token string `xorm:"-"` - TokenHash string // sha256 of token - we will ensure UNIQUE later - TokenSalt string - TokenLastEight string `xorm:"token_last_eight"` - - CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` - UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` - HasRecentActivity bool `xorm:"-"` - HasUsed bool `xorm:"-"` - } - - // First remove the index - sess := x.NewSession() - defer sess.Close() - - if err := sess.Begin(); err != nil { - return err - } - - if err := sess.Sync2(new(AccessToken)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - - if err := sess.Commit(); err != nil { - return err +func AddGPGKeyImport(x *xorm.Engine) error { + type GPGKeyImport struct { + KeyID string `xorm:"pk CHAR(16) NOT NULL"` + Content string `xorm:"TEXT NOT NULL"` } - if err := sess.Begin(); err != nil { - return err - } - - // transform all tokens to hashes - const batchSize = 100 - for start := 0; ; start += batchSize { - tokens := make([]*AccessToken, 0, batchSize) - if err := sess.Limit(batchSize, start).Find(&tokens); err != nil { - return err - } - if len(tokens) == 0 { - break - } - - for _, token := range tokens { - // generate salt - salt, err := util.CryptoRandomString(10) - if err != nil { - return err - } - token.TokenSalt = salt - token.TokenHash = base.HashToken(token.Sha1, salt) - if len(token.Sha1) < 8 { - log.Warn("Unable to transform token %s with name %s belonging to user ID %d, skipping transformation", token.Sha1, token.Name, token.UID) - continue - } - token.TokenLastEight = token.Sha1[len(token.Sha1)-8:] - token.Sha1 = "" // ensure to blank out column in case drop column doesn't work - - if _, err := sess.ID(token.ID).Cols("token_hash, token_salt, token_last_eight, sha1").Update(token); err != nil { - return fmt.Errorf("couldn't add in sha1, token_hash, token_salt and token_last_eight: %w", err) - } - - } - } - - // Commit and begin new transaction for dropping columns - if err := sess.Commit(); err != nil { - return err - } - if err := sess.Begin(); err != nil { - return err - } - - if err := base.DropTableColumns(sess, "access_token", "sha1"); err != nil { - return err - } - if err := sess.Commit(); err != nil { - return err - } - return resyncHashAppTokenWithUniqueHash(x) -} - -func resyncHashAppTokenWithUniqueHash(x *xorm.Engine) error { - // AccessToken see models/token.go - type AccessToken struct { - TokenHash string `xorm:"UNIQUE"` // sha256 of token - we will ensure UNIQUE later - } - sess := x.NewSession() - defer sess.Close() - if err := sess.Begin(); err != nil { - return err - } - if err := sess.Sync2(new(AccessToken)); err != nil { - return fmt.Errorf("Sync2: %w", err) - } - return sess.Commit() + return x.Sync2(new(GPGKeyImport)) } diff --git a/models/migrations/v1_9/v86.go b/models/migrations/v1_9/v86.go index a2a2c042c0bac..6cf7faaad6392 100644 --- a/models/migrations/v1_9/v86.go +++ b/models/migrations/v1_9/v86.go @@ -4,13 +4,116 @@ package v1_9 //nolint import ( + "fmt" + + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/timeutil" + "code.gitea.io/gitea/modules/util" + "xorm.io/xorm" ) -func AddHTTPMethodToWebhook(x *xorm.Engine) error { - type Webhook struct { - HTTPMethod string `xorm:"http_method DEFAULT 'POST'"` +func HashAppToken(x *xorm.Engine) error { + // AccessToken see models/token.go + type AccessToken struct { + ID int64 `xorm:"pk autoincr"` + UID int64 `xorm:"INDEX"` + Name string + Sha1 string + Token string `xorm:"-"` + TokenHash string // sha256 of token - we will ensure UNIQUE later + TokenSalt string + TokenLastEight string `xorm:"token_last_eight"` + + CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` + UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"` + HasRecentActivity bool `xorm:"-"` + HasUsed bool `xorm:"-"` + } + + // First remove the index + sess := x.NewSession() + defer sess.Close() + + if err := sess.Begin(); err != nil { + return err + } + + if err := sess.Sync2(new(AccessToken)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } + + if err := sess.Commit(); err != nil { + return err } - return x.Sync2(new(Webhook)) + if err := sess.Begin(); err != nil { + return err + } + + // transform all tokens to hashes + const batchSize = 100 + for start := 0; ; start += batchSize { + tokens := make([]*AccessToken, 0, batchSize) + if err := sess.Limit(batchSize, start).Find(&tokens); err != nil { + return err + } + if len(tokens) == 0 { + break + } + + for _, token := range tokens { + // generate salt + salt, err := util.CryptoRandomString(10) + if err != nil { + return err + } + token.TokenSalt = salt + token.TokenHash = base.HashToken(token.Sha1, salt) + if len(token.Sha1) < 8 { + log.Warn("Unable to transform token %s with name %s belonging to user ID %d, skipping transformation", token.Sha1, token.Name, token.UID) + continue + } + token.TokenLastEight = token.Sha1[len(token.Sha1)-8:] + token.Sha1 = "" // ensure to blank out column in case drop column doesn't work + + if _, err := sess.ID(token.ID).Cols("token_hash, token_salt, token_last_eight, sha1").Update(token); err != nil { + return fmt.Errorf("couldn't add in sha1, token_hash, token_salt and token_last_eight: %w", err) + } + + } + } + + // Commit and begin new transaction for dropping columns + if err := sess.Commit(); err != nil { + return err + } + if err := sess.Begin(); err != nil { + return err + } + + if err := base.DropTableColumns(sess, "access_token", "sha1"); err != nil { + return err + } + if err := sess.Commit(); err != nil { + return err + } + return resyncHashAppTokenWithUniqueHash(x) +} + +func resyncHashAppTokenWithUniqueHash(x *xorm.Engine) error { + // AccessToken see models/token.go + type AccessToken struct { + TokenHash string `xorm:"UNIQUE"` // sha256 of token - we will ensure UNIQUE later + } + sess := x.NewSession() + defer sess.Close() + if err := sess.Begin(); err != nil { + return err + } + if err := sess.Sync2(new(AccessToken)); err != nil { + return fmt.Errorf("Sync2: %w", err) + } + return sess.Commit() } diff --git a/models/migrations/v1_9/v87.go b/models/migrations/v1_9/v87.go index b373a40bc07fb..a2a2c042c0bac 100644 --- a/models/migrations/v1_9/v87.go +++ b/models/migrations/v1_9/v87.go @@ -7,11 +7,10 @@ import ( "xorm.io/xorm" ) -func AddAvatarFieldToRepository(x *xorm.Engine) error { - type Repository struct { - // ID(10-20)-md5(32) - must fit into 64 symbols - Avatar string `xorm:"VARCHAR(64)"` +func AddHTTPMethodToWebhook(x *xorm.Engine) error { + type Webhook struct { + HTTPMethod string `xorm:"http_method DEFAULT 'POST'"` } - return x.Sync2(new(Repository)) + return x.Sync2(new(Webhook)) } diff --git a/models/migrations/v1_9/v88.go b/models/migrations/v1_9/v88.go new file mode 100644 index 0000000000000..b373a40bc07fb --- /dev/null +++ b/models/migrations/v1_9/v88.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_9 //nolint + +import ( + "xorm.io/xorm" +) + +func AddAvatarFieldToRepository(x *xorm.Engine) error { + type Repository struct { + // ID(10-20)-md5(32) - must fit into 64 symbols + Avatar string `xorm:"VARCHAR(64)"` + } + + return x.Sync2(new(Repository)) +}