fix: 修复数据库备份与恢复问题 (#85)

* fix: 修复数据库备份与恢复问题

* fix: 修复数据库备份与恢复问题
This commit is contained in:
kanzihuang
2024-01-15 20:11:28 +08:00
committed by GitHub
parent b873855b44
commit c0232c4c75
12 changed files with 53 additions and 32 deletions

View File

@@ -5,9 +5,9 @@ WORKDIR /mayfly
COPY mayfly_go_web . COPY mayfly_go_web .
RUN yarn install RUN yarn config set registry 'https://registry.npm.taobao.org' && \
yarn install && \
RUN yarn build yarn build
# 构建后端资源 # 构建后端资源
FROM golang:1.21.5 as be-builder FROM golang:1.21.5 as be-builder

View File

@@ -19,10 +19,6 @@ var (
dataSyncApp DataSyncTask dataSyncApp DataSyncTask
) )
//var repositories *repository.Repositories
//var scheduler *dbScheduler[*entity.DbBackup]
//var scheduler1 *dbScheduler[*entity.DbRestore]
func Init() { func Init() {
sync.OnceFunc(func() { sync.OnceFunc(func() {
repositories := &repository.Repositories{ repositories := &repository.Repositories{

View File

@@ -65,10 +65,13 @@ func (app *DbBinlogApp) fetchBinlog(ctx context.Context, backup *entity.DbBackup
if ok { if ok {
latestBinlogSequence = binlogHistory.Sequence latestBinlogSequence = binlogHistory.Sequence
} else { } else {
backupHistory, err := app.backupHistoryRepo.GetEarliestHistory(backup.DbInstanceId) backupHistory, ok, err := app.backupHistoryRepo.GetEarliestHistory(backup.DbInstanceId)
if err != nil { if err != nil {
return err return err
} }
if !ok {
return nil
}
earliestBackupSequence = backupHistory.BinlogSequence earliestBackupSequence = backupHistory.BinlogSequence
} }
conn, err := app.dbApp.GetDbConnByInstanceId(backup.DbInstanceId) conn, err := app.dbApp.GetDbConnByInstanceId(backup.DbInstanceId)
@@ -92,6 +95,8 @@ func (app *DbBinlogApp) fetchBinlog(ctx context.Context, backup *entity.DbBackup
func (app *DbBinlogApp) run() { func (app *DbBinlogApp) run() {
defer app.waitGroup.Done() defer app.waitGroup.Done()
// todo: 实现 binlog 并发下载
timex.SleepWithContext(app.context, time.Minute)
for !app.closed() { for !app.closed() {
app.fetchFromAllInstances() app.fetchFromAllInstances()
timex.SleepWithContext(app.context, binlogDownloadInterval) timex.SleepWithContext(app.context, binlogDownloadInterval)

View File

@@ -232,10 +232,13 @@ func (s *dbScheduler) restoreMysql(ctx context.Context, job entity.DbJob) error
if ok { if ok {
latestBinlogSequence = binlogHistory.Sequence latestBinlogSequence = binlogHistory.Sequence
} else { } else {
backupHistory, err := s.backupHistoryRepo.GetEarliestHistory(restore.DbInstanceId) backupHistory, ok, err := s.backupHistoryRepo.GetEarliestHistory(restore.DbInstanceId)
if err != nil { if err != nil {
return err return err
} }
if !ok {
return nil
}
earliestBackupSequence = backupHistory.BinlogSequence earliestBackupSequence = backupHistory.BinlogSequence
} }
binlogFiles, err := dbProgram.FetchBinlogs(ctx, true, earliestBackupSequence, latestBinlogSequence) binlogFiles, err := dbProgram.FetchBinlogs(ctx, true, earliestBackupSequence, latestBinlogSequence)
@@ -300,12 +303,12 @@ func (s *dbScheduler) runnable(job entity.DbJob, next runner.NextFunc) bool {
itemBase := item.(entity.DbJob).GetJobBase() itemBase := item.(entity.DbJob).GetJobBase()
if jobBase.DbInstanceId == itemBase.DbInstanceId { if jobBase.DbInstanceId == itemBase.DbInstanceId {
countByInstanceId++ countByInstanceId++
if countByInstanceId > maxCountByInstanceId { if countByInstanceId >= maxCountByInstanceId {
return false return false
} }
if jobBase.DbName == itemBase.DbName { if jobBase.DbName == itemBase.DbName {
countByDbName++ countByDbName++
if countByDbName > maxCountByDbName { if countByDbName >= maxCountByDbName {
return false return false
} }
} }

View File

@@ -187,7 +187,7 @@ func (d *DbJobBaseImpl) IsFinished() bool {
return !d.Repeated && d.LastStatus == DbJobSuccess return !d.Repeated && d.LastStatus == DbJobSuccess
} }
func (d *DbJobBaseImpl) Renew(job runner.Job) { func (d *DbJobBaseImpl) Update(job runner.Job) {
jobBase := job.(DbJob).GetJobBase() jobBase := job.(DbJob).GetJobBase()
d.StartTime = jobBase.StartTime d.StartTime = jobBase.StartTime
d.Interval = jobBase.Interval d.Interval = jobBase.Interval

View File

@@ -14,5 +14,5 @@ type DbBackupHistory interface {
GetLatestHistory(instanceId uint64, dbName string, bi *entity.BinlogInfo) (*entity.DbBackupHistory, error) GetLatestHistory(instanceId uint64, dbName string, bi *entity.BinlogInfo) (*entity.DbBackupHistory, error)
GetEarliestHistory(instanceId uint64) (*entity.DbBackupHistory, error) GetEarliestHistory(instanceId uint64) (*entity.DbBackupHistory, bool, error)
} }

View File

@@ -1,6 +1,8 @@
package persistence package persistence
import ( import (
"errors"
"gorm.io/gorm"
"mayfly-go/internal/db/domain/entity" "mayfly-go/internal/db/domain/entity"
"mayfly-go/internal/db/domain/repository" "mayfly-go/internal/db/domain/repository"
"mayfly-go/pkg/base" "mayfly-go/pkg/base"
@@ -47,15 +49,19 @@ func (repo *dbBackupHistoryRepoImpl) GetLatestHistory(instanceId uint64, dbName
return history, err return history, err
} }
func (repo *dbBackupHistoryRepoImpl) GetEarliestHistory(instanceId uint64) (*entity.DbBackupHistory, error) { func (repo *dbBackupHistoryRepoImpl) GetEarliestHistory(instanceId uint64) (*entity.DbBackupHistory, bool, error) {
history := &entity.DbBackupHistory{} history := &entity.DbBackupHistory{}
db := global.Db.Model(repo.GetModel()) db := global.Db.Model(repo.GetModel())
err := db.Where("db_instance_id = ?", instanceId). err := db.Where("db_instance_id = ?", instanceId).
Scopes(gormx.UndeleteScope). Scopes(gormx.UndeleteScope).
Order("binlog_sequence"). Order("binlog_sequence").
First(history).Error First(history).Error
if err != nil { switch {
return nil, err case err == nil:
return history, true, nil
case errors.Is(err, gorm.ErrRecordNotFound):
return history, false, nil
default:
return nil, false, err
} }
return history, nil
} }

View File

@@ -94,7 +94,7 @@ func (repo *dbBinlogHistoryRepoImpl) InsertWithBinlogFiles(ctx context.Context,
if len(binlogFiles) == 0 { if len(binlogFiles) == 0 {
return nil return nil
} }
histories := make([]any, 0, len(binlogFiles)) histories := make([]*entity.DbBinlogHistory, 0, len(binlogFiles))
for _, fileOnServer := range binlogFiles { for _, fileOnServer := range binlogFiles {
if !fileOnServer.Downloaded { if !fileOnServer.Downloaded {
break break
@@ -115,7 +115,7 @@ func (repo *dbBinlogHistoryRepoImpl) InsertWithBinlogFiles(ctx context.Context,
} }
} }
if len(histories) > 0 { if len(histories) > 0 {
if err := repo.Upsert(ctx, histories[len(histories)-1].(*entity.DbBinlogHistory)); err != nil { if err := repo.Upsert(ctx, histories[len(histories)-1]); err != nil {
return err return err
} }
} }

View File

@@ -114,13 +114,16 @@ func (s *DelayQueue[T]) Dequeue(ctx context.Context) (T, bool) {
// 等待时间到期或新元素加入 // 等待时间到期或新元素加入
timer := time.NewTimer(delay) timer := time.NewTimer(delay)
select { select {
case elm := <-s.transferChan:
return elm, true
case <-s.enqueuedSignal:
continue
case <-timer.C: case <-timer.C:
continue continue
case elm := <-s.transferChan:
timer.Stop()
return elm, true
case <-s.enqueuedSignal:
timer.Stop()
continue
case <-ctx.Done(): case <-ctx.Done():
timer.Stop()
return s.zero, false return s.zero, false
} }
} }
@@ -187,12 +190,14 @@ func (s *DelayQueue[T]) Enqueue(ctx context.Context, val T) bool {
// 新元素需要延迟,等待退出信号、出队信号和到期信号 // 新元素需要延迟,等待退出信号、出队信号和到期信号
timer := time.NewTimer(delay) timer := time.NewTimer(delay)
select { select {
case <-s.dequeuedSignal:
// 收到出队信号,从头开始尝试入队
continue
case <-timer.C: case <-timer.C:
// 新元素不再需要延迟 // 新元素不再需要延迟
case <-s.dequeuedSignal:
// 收到出队信号,从头开始尝试入队
timer.Stop()
continue
case <-ctx.Done(): case <-ctx.Done():
timer.Stop()
return false return false
} }
} else { } else {

View File

@@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"github.com/emirpasic/gods/maps/linkedhashmap" "github.com/emirpasic/gods/maps/linkedhashmap"
"mayfly-go/pkg/logx" "mayfly-go/pkg/logx"
"mayfly-go/pkg/utils/timex"
"sync" "sync"
"time" "time"
) )
@@ -32,7 +33,7 @@ type Job interface {
Runnable(next NextFunc) bool Runnable(next NextFunc) bool
GetDeadline() time.Time GetDeadline() time.Time
Schedule() bool Schedule() bool
Renew(job Job) Update(job Job)
} }
type iterator[T Job] struct { type iterator[T Job] struct {
@@ -138,6 +139,7 @@ func NewRunner[T Job](maxRunning int) *Runner[T] {
} }
go func() { go func() {
defer runner.wg.Done() defer runner.wg.Done()
timex.SleepWithContext(runner.context, time.Second*10)
for runner.context.Err() == nil { for runner.context.Err() == nil {
job, ok := runner.delayQueue.Dequeue(ctx) job, ok := runner.delayQueue.Dequeue(ctx)
if !ok { if !ok {
@@ -277,7 +279,7 @@ func (r *Runner[T]) UpdateOrAdd(ctx context.Context, job T) error {
defer r.mutex.Unlock() defer r.mutex.Unlock()
if old, ok := r.all[job.GetKey()]; ok { if old, ok := r.all[job.GetKey()]; ok {
old.Renew(job) old.Update(job)
job = old job = old
} }
r.schedule(ctx, job) r.schedule(ctx, job)

View File

@@ -29,7 +29,7 @@ type testJob struct {
deadline time.Time deadline time.Time
} }
func (t *testJob) Renew(job Job) { func (t *testJob) Update(_ Job) {
} }
func (t *testJob) GetDeadline() time.Time { func (t *testJob) GetDeadline() time.Time {
@@ -82,6 +82,7 @@ func TestRunner_Close(t *testing.T) {
}() }()
waiting.Wait() waiting.Wait()
timer := time.NewTimer(time.Microsecond * 10) timer := time.NewTimer(time.Microsecond * 10)
defer timer.Stop()
runner.Close() runner.Close()
select { select {
case <-timer.C: case <-timer.C:

View File

@@ -54,7 +54,10 @@ func (nt *NullTime) MarshalJSON() ([]byte, error) {
} }
func SleepWithContext(ctx context.Context, d time.Duration) { func SleepWithContext(ctx context.Context, d time.Duration) {
ctx, cancel := context.WithTimeout(ctx, d) timer := time.NewTimer(d)
<-ctx.Done() defer timer.Stop()
cancel() select {
case <-timer.C:
case <-ctx.Done():
}
} }