mirror of
https://github.com/woodpecker-ci/woodpecker.git
synced 2024-11-24 08:02:18 +02:00
Migrate old logs to new database schema (#1828)
address new models based on #1802
This commit is contained in:
parent
609ba481b5
commit
5139624cf8
4
Makefile
4
Makefile
@ -144,11 +144,11 @@ test-cli: ## Test cli code
|
||||
go test -race -cover -coverprofile cli-coverage.out -timeout 30s github.com/woodpecker-ci/woodpecker/cmd/cli github.com/woodpecker-ci/woodpecker/cli/...
|
||||
|
||||
test-server-datastore: ## Test server datastore
|
||||
go test -timeout 30s -run TestMigrate github.com/woodpecker-ci/woodpecker/server/store/...
|
||||
go test -timeout 60s -run TestMigrate github.com/woodpecker-ci/woodpecker/server/store/...
|
||||
go test -race -timeout 30s -skip TestMigrate github.com/woodpecker-ci/woodpecker/server/store/...
|
||||
|
||||
test-server-datastore-coverage: ## Test server datastore with coverage report
|
||||
go test -race -cover -coverprofile datastore-coverage.out -timeout 30s github.com/woodpecker-ci/woodpecker/server/store/...
|
||||
go test -race -cover -coverprofile datastore-coverage.out -timeout 60s github.com/woodpecker-ci/woodpecker/server/store/...
|
||||
|
||||
test-ui: ui-dependencies ## Test UI code
|
||||
(cd web/; pnpm run lint)
|
||||
|
@ -241,6 +241,11 @@ var flags = []cli.Flag{
|
||||
Usage: "status context format",
|
||||
Value: "{{ .context }}/{{ .event }}/{{ .pipeline }}",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
EnvVars: []string{"WOODPECKER_MIGRATIONS_ALLOW_LONG"},
|
||||
Name: "migrations-allow-long",
|
||||
Value: false,
|
||||
},
|
||||
//
|
||||
// resource limit parameters
|
||||
//
|
||||
|
@ -356,6 +356,7 @@ func setupEvilGlobals(c *cli.Context, v store.Store, f forge.Forge) {
|
||||
server.Config.Pipeline.Networks = c.StringSlice("network")
|
||||
server.Config.Pipeline.Volumes = c.StringSlice("volume")
|
||||
server.Config.Pipeline.Privileged = c.StringSlice("escalate")
|
||||
server.Config.Server.Migrations.AllowLong = c.Bool("migrations-allow-long")
|
||||
|
||||
// prometheus
|
||||
server.Config.Prometheus.AuthToken = c.String("prometheus-auth-token")
|
||||
|
@ -23,8 +23,8 @@ Below are resources requirements for Woodpecker components itself:
|
||||
|
||||
| Component | Memory | CPU |
|
||||
| --------- | ------ | --- |
|
||||
| Server | 32 MB | 1 |
|
||||
| Agent | 32 MB | 1 |
|
||||
| Server | 200 MB | 1 |
|
||||
| Agent | 32 MB | 1 |
|
||||
|
||||
Note, that those values do not include the operating system or workload (pipelines execution) resources consumption.
|
||||
|
||||
|
@ -19,6 +19,7 @@ Some versions need some changes to the server configuration or the pipeline conf
|
||||
- Dropped support for [Coding](https://coding.net/) and [Gogs](https://gogs.io).
|
||||
- `/api/queue/resume` & `/api/queue/pause` endpoint methods were changed from `GET` to `POST`
|
||||
- rename `pipeline:` key in your workflow config to `steps:`
|
||||
- If you want to migrate old logs to the new format, watch the error messages on start. If there are none we are good to go, else you have to plan a migration that can take hours. Set `WOODPECKER_ALLOW_LONG_MIGRATION` to true and let it run.
|
||||
|
||||
## 0.15.0
|
||||
|
||||
|
@ -67,6 +67,9 @@ var Config = struct {
|
||||
StatusContextFormat string
|
||||
SessionExpires time.Duration
|
||||
RootURL string
|
||||
Migrations struct {
|
||||
AllowLong bool
|
||||
}
|
||||
// Open bool
|
||||
// Orgs map[string]struct{}
|
||||
// Admins map[string]struct{}
|
||||
|
175
server/store/datastore/migration/019_alter_logs_table.go
Normal file
175
server/store/datastore/migration/019_alter_logs_table.go
Normal file
@ -0,0 +1,175 @@
|
||||
// Copyright 2023 Woodpecker Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package migration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/tevino/abool"
|
||||
"xorm.io/xorm"
|
||||
|
||||
"github.com/woodpecker-ci/woodpecker/server"
|
||||
"github.com/woodpecker-ci/woodpecker/shared/utils"
|
||||
)
|
||||
|
||||
// maxDefaultSqliteItems set the threshold at witch point the migration will fail by default
|
||||
var maxDefaultSqliteItems019 = 5000
|
||||
|
||||
// perPage019 set the size of the slice to read per page
|
||||
var perPage019 = 100
|
||||
|
||||
type oldLogs019 struct {
|
||||
ID int64 `xorm:"pk autoincr 'log_id'"`
|
||||
StepID int64 `xorm:"UNIQUE 'log_step_id'"`
|
||||
Data []byte `xorm:"LONGBLOB 'log_data'"`
|
||||
}
|
||||
|
||||
func (oldLogs019) TableName() string {
|
||||
return "logs"
|
||||
}
|
||||
|
||||
type oldLogEntry019 struct {
|
||||
Step string `json:"step,omitempty"`
|
||||
Time int64 `json:"time,omitempty"`
|
||||
Type int `json:"type,omitempty"`
|
||||
Pos int `json:"pos,omitempty"`
|
||||
Out string `json:"out,omitempty"`
|
||||
}
|
||||
|
||||
type newLogEntry019 struct {
|
||||
ID int64 `xorm:"pk autoincr 'id'"`
|
||||
StepID int64 `xorm:"'step_id'"`
|
||||
Time int64
|
||||
Line int
|
||||
Data []byte `xorm:"LONGBLOB"`
|
||||
Created int64 `xorm:"created"`
|
||||
Type int
|
||||
}
|
||||
|
||||
func (newLogEntry019) TableName() string {
|
||||
return "log_entries"
|
||||
}
|
||||
|
||||
var initLogsEntriesTable = task{
|
||||
name: "init-log_entries",
|
||||
required: true,
|
||||
fn: func(sess *xorm.Session) error {
|
||||
return sess.Sync(new(newLogEntry019))
|
||||
},
|
||||
}
|
||||
|
||||
var migrateLogs2LogEntries = task{
|
||||
name: "migrate-logs-to-log_entries",
|
||||
required: false,
|
||||
engineFn: func(e *xorm.Engine) error {
|
||||
// make sure old logs table exists
|
||||
if exist, err := e.IsTableExist(new(oldLogs019)); !exist || err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// first we check if we have just 1000 entries to migrate
|
||||
toMigrate, err := e.Count(new(oldLogs019))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if toMigrate > int64(maxDefaultSqliteItems019) && !server.Config.Server.Migrations.AllowLong {
|
||||
return fmt.Errorf("Migrating logs to log_entries is skipped, as we have %d entries to convert. Set 'WOODPECKER_MIGRATIONS_ALLOW_LONG' to 'true' to migrate anyway", toMigrate)
|
||||
}
|
||||
|
||||
if err := e.Sync(new(oldLogs019)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
page := 0
|
||||
logs := make([]*oldLogs019, 0, perPage019)
|
||||
logEntries := make([]*oldLogEntry019, 0, 50)
|
||||
sigterm := abool.New()
|
||||
ctx, cancelCtx := context.WithCancelCause(context.Background())
|
||||
defer cancelCtx(nil)
|
||||
_ = utils.WithContextSigtermCallback(ctx, func() {
|
||||
log.Info().Msg("ctrl+c received, stopping current migration")
|
||||
sigterm.Set()
|
||||
})
|
||||
|
||||
for {
|
||||
if sigterm.IsSet() {
|
||||
return fmt.Errorf("migration 'migrate-logs-to-log_entries' gracefully aborted")
|
||||
}
|
||||
|
||||
sess := e.NewSession().NoCache()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
logs = logs[:0]
|
||||
|
||||
err := sess.Limit(perPage019).Find(&logs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace().Msgf("migrate-logs-to-log_entries: process page %d", page)
|
||||
|
||||
for _, l := range logs {
|
||||
logEntries = logEntries[:0]
|
||||
if err := json.Unmarshal(l.Data, &logEntries); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
time := int64(0)
|
||||
for _, logEntry := range logEntries {
|
||||
|
||||
if logEntry.Time > time {
|
||||
time = logEntry.Time
|
||||
}
|
||||
|
||||
log := &newLogEntry019{
|
||||
StepID: l.StepID,
|
||||
Data: []byte(logEntry.Out),
|
||||
Line: logEntry.Pos,
|
||||
Time: time,
|
||||
Type: logEntry.Type,
|
||||
}
|
||||
|
||||
if _, err := sess.Insert(log); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := sess.Delete(l); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := sess.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(logs) < perPage019 {
|
||||
break
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
page++
|
||||
}
|
||||
|
||||
return e.DropTables("logs")
|
||||
},
|
||||
}
|
@ -50,6 +50,8 @@ var migrationTasks = []*task{
|
||||
&dropFiles,
|
||||
&removeMachineCol,
|
||||
&dropOldCols,
|
||||
&initLogsEntriesTable,
|
||||
&migrateLogs2LogEntries,
|
||||
}
|
||||
|
||||
var allBeans = []interface{}{
|
||||
@ -78,6 +80,8 @@ type task struct {
|
||||
name string
|
||||
required bool
|
||||
fn func(sess *xorm.Session) error
|
||||
// engineFn does manage session on it's own. only use it if you really need to
|
||||
engineFn func(e *xorm.Engine) error
|
||||
}
|
||||
|
||||
// initNew create tables for new instance
|
||||
@ -153,36 +157,44 @@ func runTasks(e *xorm.Engine, tasks []*task) error {
|
||||
}
|
||||
|
||||
log.Trace().Msgf("start migration task '%s'", task.name)
|
||||
sess := e.NewSession().NoCache()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
aliveMsgCancel := showBeAliveSign(task.name)
|
||||
defer aliveMsgCancel(nil)
|
||||
var taskErr error
|
||||
if task.fn != nil {
|
||||
aliveMsgCancel := showBeAliveSign(task.name)
|
||||
if err := task.fn(sess); err != nil {
|
||||
sess := e.NewSession().NoCache()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if taskErr = task.fn(sess); taskErr != nil {
|
||||
aliveMsgCancel(nil)
|
||||
if err2 := sess.Rollback(); err2 != nil {
|
||||
err = errors.Join(err, err2)
|
||||
taskErr = errors.Join(taskErr, err2)
|
||||
}
|
||||
|
||||
if task.required {
|
||||
return err
|
||||
}
|
||||
log.Error().Err(err).Msgf("migration task '%s' failed but is not required", task.name)
|
||||
continue
|
||||
}
|
||||
aliveMsgCancel(nil)
|
||||
log.Debug().Msgf("migration task '%s' done", task.name)
|
||||
if err := sess.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if task.engineFn != nil {
|
||||
taskErr = task.engineFn(e)
|
||||
} else {
|
||||
log.Trace().Msgf("skip migration task '%s'", task.name)
|
||||
aliveMsgCancel(nil)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := sess.Insert(&migrations{task.name}); err != nil {
|
||||
return err
|
||||
aliveMsgCancel(nil)
|
||||
if taskErr != nil {
|
||||
if task.required {
|
||||
return taskErr
|
||||
}
|
||||
log.Error().Err(taskErr).Msgf("migration task '%s' failed but is not required", task.name)
|
||||
continue
|
||||
}
|
||||
if err := sess.Commit(); err != nil {
|
||||
log.Debug().Msgf("migration task '%s' done", task.name)
|
||||
|
||||
if _, err := e.Insert(&migrations{task.name}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user