2024-01-05 08:55:34 +08:00
|
|
|
|
package application
|
|
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
|
"context"
|
|
|
|
|
|
"errors"
|
|
|
|
|
|
"fmt"
|
2024-01-12 13:15:30 +08:00
|
|
|
|
"mayfly-go/internal/db/dbm/dbi"
|
2024-01-05 08:55:34 +08:00
|
|
|
|
"mayfly-go/internal/db/domain/entity"
|
|
|
|
|
|
"mayfly-go/internal/db/domain/repository"
|
2024-01-11 11:35:51 +08:00
|
|
|
|
"mayfly-go/pkg/logx"
|
|
|
|
|
|
"mayfly-go/pkg/runner"
|
|
|
|
|
|
"reflect"
|
2024-01-05 08:55:34 +08:00
|
|
|
|
"sync"
|
|
|
|
|
|
"time"
|
|
|
|
|
|
)
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
const (
|
|
|
|
|
|
maxRunning = 8
|
|
|
|
|
|
)
|
2024-01-05 08:55:34 +08:00
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
type dbScheduler struct {
|
|
|
|
|
|
mutex sync.Mutex
|
|
|
|
|
|
runner *runner.Runner[entity.DbJob]
|
|
|
|
|
|
dbApp Db
|
|
|
|
|
|
backupRepo repository.DbBackup
|
|
|
|
|
|
backupHistoryRepo repository.DbBackupHistory
|
|
|
|
|
|
restoreRepo repository.DbRestore
|
|
|
|
|
|
restoreHistoryRepo repository.DbRestoreHistory
|
2024-01-17 08:37:22 +00:00
|
|
|
|
binlogRepo repository.DbBinlog
|
2024-01-11 11:35:51 +08:00
|
|
|
|
binlogHistoryRepo repository.DbBinlogHistory
|
2024-01-17 08:37:22 +00:00
|
|
|
|
binlogTimes map[uint64]time.Time
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func newDbScheduler(repositories *repository.Repositories) (*dbScheduler, error) {
|
|
|
|
|
|
scheduler := &dbScheduler{
|
|
|
|
|
|
dbApp: dbApp,
|
|
|
|
|
|
backupRepo: repositories.Backup,
|
|
|
|
|
|
backupHistoryRepo: repositories.BackupHistory,
|
|
|
|
|
|
restoreRepo: repositories.Restore,
|
|
|
|
|
|
restoreHistoryRepo: repositories.RestoreHistory,
|
2024-01-17 08:37:22 +00:00
|
|
|
|
binlogRepo: repositories.Binlog,
|
2024-01-11 11:35:51 +08:00
|
|
|
|
binlogHistoryRepo: repositories.BinlogHistory,
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
2024-01-17 08:37:22 +00:00
|
|
|
|
scheduler.runner = runner.NewRunner[entity.DbJob](maxRunning, scheduler.runJob,
|
|
|
|
|
|
runner.WithScheduleJob[entity.DbJob](scheduler.scheduleJob),
|
|
|
|
|
|
runner.WithRunnableJob[entity.DbJob](scheduler.runnableJob),
|
|
|
|
|
|
)
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return scheduler, nil
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-17 08:37:22 +00:00
|
|
|
|
func (s *dbScheduler) scheduleJob(job entity.DbJob) (time.Time, error) {
|
|
|
|
|
|
return job.Schedule()
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func (s *dbScheduler) repo(typ entity.DbJobType) repository.DbJob {
|
|
|
|
|
|
switch typ {
|
|
|
|
|
|
case entity.DbJobTypeBackup:
|
|
|
|
|
|
return s.backupRepo
|
|
|
|
|
|
case entity.DbJobTypeRestore:
|
|
|
|
|
|
return s.restoreRepo
|
2024-01-17 08:37:22 +00:00
|
|
|
|
case entity.DbJobTypeBinlog:
|
|
|
|
|
|
return s.binlogRepo
|
2024-01-11 11:35:51 +08:00
|
|
|
|
default:
|
|
|
|
|
|
panic(errors.New(fmt.Sprintf("无效的数据库任务类型: %v", typ)))
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func (s *dbScheduler) UpdateJob(ctx context.Context, job entity.DbJob) error {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
s.mutex.Lock()
|
|
|
|
|
|
defer s.mutex.Unlock()
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
if err := s.repo(job.GetJobType()).UpdateById(ctx, job); err != nil {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return err
|
|
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
_ = s.runner.UpdateOrAdd(ctx, job)
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func (s *dbScheduler) Close() {
|
|
|
|
|
|
s.runner.Close()
|
|
|
|
|
|
}
|
2024-01-05 08:55:34 +08:00
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func (s *dbScheduler) AddJob(ctx context.Context, saving bool, jobType entity.DbJobType, jobs any) error {
|
|
|
|
|
|
s.mutex.Lock()
|
|
|
|
|
|
defer s.mutex.Unlock()
|
2024-01-05 08:55:34 +08:00
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
if saving {
|
|
|
|
|
|
if err := s.repo(jobType).AddJob(ctx, jobs); err != nil {
|
|
|
|
|
|
return err
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
}
|
2024-01-05 08:55:34 +08:00
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
reflectValue := reflect.ValueOf(jobs)
|
|
|
|
|
|
switch reflectValue.Kind() {
|
|
|
|
|
|
case reflect.Array, reflect.Slice:
|
|
|
|
|
|
reflectLen := reflectValue.Len()
|
|
|
|
|
|
for i := 0; i < reflectLen; i++ {
|
|
|
|
|
|
job := reflectValue.Index(i).Interface().(entity.DbJob)
|
|
|
|
|
|
job.SetJobType(jobType)
|
|
|
|
|
|
_ = s.runner.Add(ctx, job)
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
default:
|
|
|
|
|
|
job := jobs.(entity.DbJob)
|
|
|
|
|
|
job.SetJobType(jobType)
|
|
|
|
|
|
_ = s.runner.Add(ctx, job)
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
return nil
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func (s *dbScheduler) RemoveJob(ctx context.Context, jobType entity.DbJobType, jobId uint64) error {
|
|
|
|
|
|
// todo: 删除数据库备份历史文件
|
|
|
|
|
|
s.mutex.Lock()
|
|
|
|
|
|
defer s.mutex.Unlock()
|
2024-01-05 08:55:34 +08:00
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
if err := s.repo(jobType).DeleteById(ctx, jobId); err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
_ = s.runner.Remove(ctx, entity.FormatJobKey(jobType, jobId))
|
|
|
|
|
|
return nil
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func (s *dbScheduler) EnableJob(ctx context.Context, jobType entity.DbJobType, jobId uint64) error {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
s.mutex.Lock()
|
|
|
|
|
|
defer s.mutex.Unlock()
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
repo := s.repo(jobType)
|
|
|
|
|
|
job := entity.NewDbJob(jobType)
|
|
|
|
|
|
if err := repo.GetById(job, jobId); err != nil {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return err
|
|
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
if job.IsEnabled() {
|
|
|
|
|
|
return nil
|
|
|
|
|
|
}
|
2024-01-17 08:37:22 +00:00
|
|
|
|
job.SetEnabled(true)
|
2024-01-11 11:35:51 +08:00
|
|
|
|
if err := repo.UpdateEnabled(ctx, jobId, true); err != nil {
|
|
|
|
|
|
return err
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
_ = s.runner.Add(ctx, job)
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func (s *dbScheduler) DisableJob(ctx context.Context, jobType entity.DbJobType, jobId uint64) error {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
s.mutex.Lock()
|
|
|
|
|
|
defer s.mutex.Unlock()
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
repo := s.repo(jobType)
|
|
|
|
|
|
job := entity.NewDbJob(jobType)
|
|
|
|
|
|
if err := repo.GetById(job, jobId); err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
if !job.IsEnabled() {
|
|
|
|
|
|
return nil
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
if err := repo.UpdateEnabled(ctx, jobId, false); err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
_ = s.runner.Remove(ctx, job.GetKey())
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func (s *dbScheduler) StartJobNow(ctx context.Context, jobType entity.DbJobType, jobId uint64) error {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
s.mutex.Lock()
|
|
|
|
|
|
defer s.mutex.Unlock()
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
job := entity.NewDbJob(jobType)
|
|
|
|
|
|
if err := s.repo(jobType).GetById(job, jobId); err != nil {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return err
|
|
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
if !job.IsEnabled() {
|
|
|
|
|
|
return errors.New("任务未启用")
|
|
|
|
|
|
}
|
|
|
|
|
|
_ = s.runner.StartNow(ctx, job)
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func (s *dbScheduler) backupMysql(ctx context.Context, job entity.DbJob) error {
|
|
|
|
|
|
id, err := NewIncUUID()
|
|
|
|
|
|
if err != nil {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return err
|
|
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
backup := job.(*entity.DbBackup)
|
|
|
|
|
|
history := &entity.DbBackupHistory{
|
|
|
|
|
|
Uuid: id.String(),
|
|
|
|
|
|
DbBackupId: backup.Id,
|
|
|
|
|
|
DbInstanceId: backup.DbInstanceId,
|
|
|
|
|
|
DbName: backup.DbName,
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
conn, err := s.dbApp.GetDbConnByInstanceId(backup.DbInstanceId)
|
|
|
|
|
|
if err != nil {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return err
|
|
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
dbProgram := conn.GetDialect().GetDbProgram()
|
|
|
|
|
|
binlogInfo, err := dbProgram.Backup(ctx, history)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
name := backup.Name
|
|
|
|
|
|
if len(name) == 0 {
|
|
|
|
|
|
name = backup.DbName
|
|
|
|
|
|
}
|
|
|
|
|
|
history.Name = fmt.Sprintf("%s[%s]", name, now.Format(time.DateTime))
|
|
|
|
|
|
history.CreateTime = now
|
|
|
|
|
|
history.BinlogFileName = binlogInfo.FileName
|
|
|
|
|
|
history.BinlogSequence = binlogInfo.Sequence
|
|
|
|
|
|
history.BinlogPosition = binlogInfo.Position
|
|
|
|
|
|
|
|
|
|
|
|
if err := s.backupHistoryRepo.Insert(ctx, history); err != nil {
|
|
|
|
|
|
return err
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
|
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-11 11:35:51 +08:00
|
|
|
|
func (s *dbScheduler) restoreMysql(ctx context.Context, job entity.DbJob) error {
|
|
|
|
|
|
restore := job.(*entity.DbRestore)
|
|
|
|
|
|
conn, err := s.dbApp.GetDbConnByInstanceId(restore.DbInstanceId)
|
|
|
|
|
|
if err != nil {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return err
|
|
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
dbProgram := conn.GetDialect().GetDbProgram()
|
|
|
|
|
|
if restore.PointInTime.Valid {
|
|
|
|
|
|
latestBinlogSequence, earliestBackupSequence := int64(-1), int64(-1)
|
|
|
|
|
|
binlogHistory, ok, err := s.binlogHistoryRepo.GetLatestHistory(restore.DbInstanceId)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
if ok {
|
|
|
|
|
|
latestBinlogSequence = binlogHistory.Sequence
|
|
|
|
|
|
} else {
|
2024-01-15 20:11:28 +08:00
|
|
|
|
backupHistory, ok, err := s.backupHistoryRepo.GetEarliestHistory(restore.DbInstanceId)
|
2024-01-11 11:35:51 +08:00
|
|
|
|
if err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
2024-01-15 20:11:28 +08:00
|
|
|
|
if !ok {
|
|
|
|
|
|
return nil
|
|
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
earliestBackupSequence = backupHistory.BinlogSequence
|
|
|
|
|
|
}
|
|
|
|
|
|
binlogFiles, err := dbProgram.FetchBinlogs(ctx, true, earliestBackupSequence, latestBinlogSequence)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
if err := s.binlogHistoryRepo.InsertWithBinlogFiles(ctx, restore.DbInstanceId, binlogFiles); err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
if err := s.restorePointInTime(ctx, dbProgram, restore); err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
} else {
|
|
|
|
|
|
if err := s.restoreBackupHistory(ctx, dbProgram, restore); err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
history := &entity.DbRestoreHistory{
|
|
|
|
|
|
CreateTime: time.Now(),
|
|
|
|
|
|
DbRestoreId: restore.Id,
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
if err := s.restoreHistoryRepo.Insert(ctx, history); err != nil {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-17 08:37:22 +00:00
|
|
|
|
func (s *dbScheduler) runJob(ctx context.Context, job entity.DbJob) {
|
2024-01-11 11:35:51 +08:00
|
|
|
|
job.SetLastStatus(entity.DbJobRunning, nil)
|
|
|
|
|
|
if err := s.repo(job.GetJobType()).UpdateLastStatus(ctx, job); err != nil {
|
|
|
|
|
|
logx.Errorf("failed to update job status: %v", err)
|
|
|
|
|
|
return
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
var errRun error
|
|
|
|
|
|
switch typ := job.GetJobType(); typ {
|
|
|
|
|
|
case entity.DbJobTypeBackup:
|
|
|
|
|
|
errRun = s.backupMysql(ctx, job)
|
|
|
|
|
|
case entity.DbJobTypeRestore:
|
|
|
|
|
|
errRun = s.restoreMysql(ctx, job)
|
2024-01-17 08:37:22 +00:00
|
|
|
|
case entity.DbJobTypeBinlog:
|
|
|
|
|
|
errRun = s.fetchBinlogMysql(ctx, job)
|
2024-01-11 11:35:51 +08:00
|
|
|
|
default:
|
|
|
|
|
|
errRun = errors.New(fmt.Sprintf("无效的数据库任务类型: %v", typ))
|
|
|
|
|
|
}
|
|
|
|
|
|
status := entity.DbJobSuccess
|
|
|
|
|
|
if errRun != nil {
|
|
|
|
|
|
status = entity.DbJobFailed
|
|
|
|
|
|
}
|
|
|
|
|
|
job.SetLastStatus(status, errRun)
|
|
|
|
|
|
if err := s.repo(job.GetJobType()).UpdateLastStatus(ctx, job); err != nil {
|
|
|
|
|
|
logx.Errorf("failed to update job status: %v", err)
|
|
|
|
|
|
return
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2024-01-05 08:55:34 +08:00
|
|
|
|
|
2024-01-17 08:37:22 +00:00
|
|
|
|
func (s *dbScheduler) runnableJob(job entity.DbJob, next runner.NextJobFunc[entity.DbJob]) bool {
|
2024-01-11 11:35:51 +08:00
|
|
|
|
const maxCountByInstanceId = 4
|
|
|
|
|
|
const maxCountByDbName = 1
|
|
|
|
|
|
var countByInstanceId, countByDbName int
|
|
|
|
|
|
jobBase := job.GetJobBase()
|
|
|
|
|
|
for item, ok := next(); ok; item, ok = next() {
|
2024-01-17 08:37:22 +00:00
|
|
|
|
itemBase := item.GetJobBase()
|
2024-01-11 11:35:51 +08:00
|
|
|
|
if jobBase.DbInstanceId == itemBase.DbInstanceId {
|
|
|
|
|
|
countByInstanceId++
|
2024-01-15 20:11:28 +08:00
|
|
|
|
if countByInstanceId >= maxCountByInstanceId {
|
2024-01-11 11:35:51 +08:00
|
|
|
|
return false
|
|
|
|
|
|
}
|
2024-01-17 08:37:22 +00:00
|
|
|
|
|
|
|
|
|
|
if relatedToBinlog(job.GetJobType()) {
|
|
|
|
|
|
// todo: 恢复数据库前触发 BINLOG 同步,BINLOG 同步完成后才能恢复数据库
|
|
|
|
|
|
if relatedToBinlog(item.GetJobType()) {
|
|
|
|
|
|
return false
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if job.GetDbName() == item.GetDbName() {
|
2024-01-11 11:35:51 +08:00
|
|
|
|
countByDbName++
|
2024-01-15 20:11:28 +08:00
|
|
|
|
if countByDbName >= maxCountByDbName {
|
2024-01-11 11:35:51 +08:00
|
|
|
|
return false
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
return true
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-17 08:37:22 +00:00
|
|
|
|
func relatedToBinlog(typ entity.DbJobType) bool {
|
|
|
|
|
|
return typ == entity.DbJobTypeRestore || typ == entity.DbJobTypeBinlog
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-12 13:15:30 +08:00
|
|
|
|
func (s *dbScheduler) restorePointInTime(ctx context.Context, program dbi.DbProgram, job *entity.DbRestore) error {
|
2024-01-11 11:35:51 +08:00
|
|
|
|
binlogHistory, err := s.binlogHistoryRepo.GetHistoryByTime(job.DbInstanceId, job.PointInTime.Time)
|
|
|
|
|
|
if err != nil {
|
2024-01-05 08:55:34 +08:00
|
|
|
|
return err
|
|
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
position, err := program.GetBinlogEventPositionAtOrAfterTime(ctx, binlogHistory.FileName, job.PointInTime.Time)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
return err
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
2024-01-11 11:35:51 +08:00
|
|
|
|
target := &entity.BinlogInfo{
|
|
|
|
|
|
FileName: binlogHistory.FileName,
|
|
|
|
|
|
Sequence: binlogHistory.Sequence,
|
|
|
|
|
|
Position: position,
|
|
|
|
|
|
}
|
|
|
|
|
|
backupHistory, err := s.backupHistoryRepo.GetLatestHistory(job.DbInstanceId, job.DbName, target)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
start := &entity.BinlogInfo{
|
|
|
|
|
|
FileName: backupHistory.BinlogFileName,
|
|
|
|
|
|
Sequence: backupHistory.BinlogSequence,
|
|
|
|
|
|
Position: backupHistory.BinlogPosition,
|
|
|
|
|
|
}
|
|
|
|
|
|
binlogHistories, err := s.binlogHistoryRepo.GetHistories(job.DbInstanceId, start, target)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
2024-01-12 13:15:30 +08:00
|
|
|
|
restoreInfo := &dbi.RestoreInfo{
|
2024-01-11 11:35:51 +08:00
|
|
|
|
BackupHistory: backupHistory,
|
|
|
|
|
|
BinlogHistories: binlogHistories,
|
|
|
|
|
|
StartPosition: backupHistory.BinlogPosition,
|
|
|
|
|
|
TargetPosition: target.Position,
|
|
|
|
|
|
TargetTime: job.PointInTime.Time,
|
|
|
|
|
|
}
|
|
|
|
|
|
if err := program.RestoreBackupHistory(ctx, backupHistory.DbName, backupHistory.DbBackupId, backupHistory.Uuid); err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
return program.ReplayBinlog(ctx, job.DbName, job.DbName, restoreInfo)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-01-12 13:15:30 +08:00
|
|
|
|
func (s *dbScheduler) restoreBackupHistory(ctx context.Context, program dbi.DbProgram, job *entity.DbRestore) error {
|
2024-01-11 11:35:51 +08:00
|
|
|
|
backupHistory := &entity.DbBackupHistory{}
|
|
|
|
|
|
if err := s.backupHistoryRepo.GetById(backupHistory, job.DbBackupHistoryId); err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
return program.RestoreBackupHistory(ctx, backupHistory.DbName, backupHistory.DbBackupId, backupHistory.Uuid)
|
2024-01-05 08:55:34 +08:00
|
|
|
|
}
|
2024-01-17 08:37:22 +00:00
|
|
|
|
|
|
|
|
|
|
func (s *dbScheduler) fetchBinlogMysql(ctx context.Context, backup entity.DbJob) error {
|
|
|
|
|
|
instanceId := backup.GetJobBase().DbInstanceId
|
|
|
|
|
|
latestBinlogSequence, earliestBackupSequence := int64(-1), int64(-1)
|
|
|
|
|
|
binlogHistory, ok, err := s.binlogHistoryRepo.GetLatestHistory(instanceId)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
if ok {
|
|
|
|
|
|
latestBinlogSequence = binlogHistory.Sequence
|
|
|
|
|
|
} else {
|
|
|
|
|
|
backupHistory, ok, err := s.backupHistoryRepo.GetEarliestHistory(instanceId)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
if !ok {
|
|
|
|
|
|
return nil
|
|
|
|
|
|
}
|
|
|
|
|
|
earliestBackupSequence = backupHistory.BinlogSequence
|
|
|
|
|
|
}
|
|
|
|
|
|
conn, err := s.dbApp.GetDbConnByInstanceId(instanceId)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
return err
|
|
|
|
|
|
}
|
|
|
|
|
|
dbProgram := conn.GetDialect().GetDbProgram()
|
|
|
|
|
|
binlogFiles, err := dbProgram.FetchBinlogs(ctx, false, earliestBackupSequence, latestBinlogSequence)
|
|
|
|
|
|
if err == nil {
|
|
|
|
|
|
err = s.binlogHistoryRepo.InsertWithBinlogFiles(ctx, instanceId, binlogFiles)
|
|
|
|
|
|
}
|
|
|
|
|
|
return nil
|
|
|
|
|
|
}
|