Merge branch 'dev' of https://gitee.com/objs/mayfly-go into dev

This commit is contained in:
meilin.huang
2024-02-06 15:17:39 +08:00
51 changed files with 572 additions and 305 deletions

View File

@@ -28,8 +28,11 @@
<el-form-item prop="startTime" label="开始时间"> <el-form-item prop="startTime" label="开始时间">
<el-date-picker v-model="state.form.startTime" type="datetime" placeholder="开始时间" /> <el-date-picker v-model="state.form.startTime" type="datetime" placeholder="开始时间" />
</el-form-item> </el-form-item>
<el-form-item prop="intervalDay" label="备份周期"> <el-form-item prop="intervalDay" label="备份周期(天)">
<el-input v-model.number="state.form.intervalDay" type="number" placeholder="备份周期(单位:天"></el-input> <el-input v-model.number="state.form.intervalDay" type="number" placeholder="单位:天"></el-input>
</el-form-item>
<el-form-item prop="maxSaveDays" label="备份历史保留天数">
<el-input v-model.number="state.form.maxSaveDays" type="number" placeholder="0: 永久保留"></el-input>
</el-form-item> </el-form-item>
</el-form> </el-form>
@@ -92,6 +95,14 @@ const rules = {
trigger: ['change', 'blur'], trigger: ['change', 'blur'],
}, },
], ],
maxSaveDays: [
{
required: true,
pattern: /^[0-9]\d*$/,
message: '请输入非负整数',
trigger: ['change', 'blur'],
},
],
}; };
const backupForm: any = ref(null); const backupForm: any = ref(null);
@@ -102,9 +113,10 @@ const state = reactive({
dbId: 0, dbId: 0,
dbNames: '', dbNames: '',
name: '', name: '',
intervalDay: null, intervalDay: 1,
startTime: null as any, startTime: null as any,
repeated: null as any, repeated: true,
maxSaveDays: 0,
}, },
btnLoading: false, btnLoading: false,
dbNamesSelected: [] as any, dbNamesSelected: [] as any,
@@ -137,12 +149,14 @@ const init = (data: any) => {
state.form.name = data.name; state.form.name = data.name;
state.form.intervalDay = data.intervalDay; state.form.intervalDay = data.intervalDay;
state.form.startTime = data.startTime; state.form.startTime = data.startTime;
state.form.maxSaveDays = data.maxSaveDays;
} else { } else {
state.editOrCreate = false; state.editOrCreate = false;
state.form.name = ''; state.form.name = '';
state.form.intervalDay = null; state.form.intervalDay = 1;
const now = new Date(); const now = new Date();
state.form.startTime = new Date(now.getFullYear(), now.getMonth(), now.getDate() + 1); state.form.startTime = new Date(now.getFullYear(), now.getMonth(), now.getDate() + 1);
state.form.maxSaveDays = 0;
getDbNamesWithoutBackup(); getDbNamesWithoutBackup();
} }
}; };

View File

@@ -25,6 +25,7 @@
<template #action="{ data }"> <template #action="{ data }">
<el-button @click="showInfo(data)" link>详情</el-button> <el-button @click="showInfo(data)" link>详情</el-button>
<el-button v-if="actionBtns[perms.saveInstance]" @click="editInstance(data)" type="primary" link>编辑</el-button> <el-button v-if="actionBtns[perms.saveInstance]" @click="editInstance(data)" type="primary" link>编辑</el-button>
<el-button v-if="actionBtns[perms.delInstance]" @click="deleteInstance(data)" type="primary" link>删除</el-button>
</template> </template>
</page-table> </page-table>
@@ -91,7 +92,7 @@ const columns = ref([
]); ]);
// 该用户拥有的的操作列按钮权限 // 该用户拥有的的操作列按钮权限
const actionBtns = hasPerms([perms.saveInstance]); const actionBtns = hasPerms(Object.values(perms));
const actionColumn = TableColumn.new('action', '操作').isSlot().setMinWidth(110).fixedRight().alignCenter(); const actionColumn = TableColumn.new('action', '操作').isSlot().setMinWidth(110).fixedRight().alignCenter();
const pageTableRef: Ref<any> = ref(null); const pageTableRef: Ref<any> = ref(null);
@@ -150,14 +151,26 @@ const editInstance = async (data: any) => {
state.instanceEditDialog.visible = true; state.instanceEditDialog.visible = true;
}; };
const deleteInstance = async () => { const deleteInstance = async (data: any) => {
try { try {
await ElMessageBox.confirm(`确定删除数据库实例【${state.selectionData.map((x: any) => x.name).join(', ')}】?`, '提示', { let instanceName: string;
if (data) {
instanceName = data.name;
} else {
instanceName = state.selectionData.map((x: any) => x.name).join(', ');
}
await ElMessageBox.confirm(`确定删除数据库实例【${instanceName}】?`, '提示', {
confirmButtonText: '确定', confirmButtonText: '确定',
cancelButtonText: '取消', cancelButtonText: '取消',
type: 'warning', type: 'warning',
}); });
await dbApi.deleteInstance.request({ id: state.selectionData.map((x: any) => x.id).join(',') }); let instanceId: string;
if (data) {
instanceId = data.id;
} else {
instanceId = state.selectionData.map((x: any) => x.id).join(',');
}
await dbApi.deleteInstance.request({ id: instanceId });
ElMessage.success('删除成功'); ElMessage.success('删除成功');
search(); search();
} catch (err) { } catch (err) {

View File

@@ -38,7 +38,6 @@ require (
// gorm // gorm
gorm.io/driver/mysql v1.5.2 gorm.io/driver/mysql v1.5.2
gorm.io/gorm v1.25.6 gorm.io/gorm v1.25.6
) )
require ( require (

View File

@@ -78,8 +78,6 @@ func (d *Db) DeleteDb(rc *req.Ctx) {
d.DbApp.Delete(ctx, dbId) d.DbApp.Delete(ctx, dbId)
// 删除该库的sql执行记录 // 删除该库的sql执行记录
d.DbSqlExecApp.DeleteBy(ctx, &entity.DbSqlExec{DbId: dbId}) d.DbSqlExecApp.DeleteBy(ctx, &entity.DbSqlExec{DbId: dbId})
// todo delete restore task and histories
} }
} }

View File

@@ -81,6 +81,7 @@ func (d *DbBackup) Update(rc *req.Ctx) {
job.Name = backupForm.Name job.Name = backupForm.Name
job.StartTime = backupForm.StartTime job.StartTime = backupForm.StartTime
job.Interval = backupForm.Interval job.Interval = backupForm.Interval
job.MaxSaveDays = backupForm.MaxSaveDays
biz.ErrIsNilAppendErr(d.backupApp.Update(rc.MetaCtx, job), "保存数据库备份任务失败: %v") biz.ErrIsNilAppendErr(d.backupApp.Update(rc.MetaCtx, job), "保存数据库备份任务失败: %v")
} }
@@ -178,7 +179,7 @@ func (d *DbBackup) GetHistoryPageList(rc *req.Ctx) {
rc.ResData = res rc.ResData = res
} }
// RestoreHistories 删除数据库备份历史 // RestoreHistories 数据库备份历史中恢复数据库
// @router /api/dbs/:dbId/backup-histories/:backupHistoryId/restore [POST] // @router /api/dbs/:dbId/backup-histories/:backupHistoryId/restore [POST]
func (d *DbBackup) RestoreHistories(rc *req.Ctx) { func (d *DbBackup) RestoreHistories(rc *req.Ctx) {
pm := ginx.PathParam(rc.GinCtx, "backupHistoryId") pm := ginx.PathParam(rc.GinCtx, "backupHistoryId")

View File

@@ -87,16 +87,10 @@ func (d *Instance) DeleteInstance(rc *req.Ctx) {
for _, v := range ids { for _, v := range ids {
value, err := strconv.Atoi(v) value, err := strconv.Atoi(v)
biz.ErrIsNilAppendErr(err, "string类型转换为int异常: %s") biz.ErrIsNilAppendErr(err, "删除数据库实例失败: %s")
instanceId := uint64(value) instanceId := uint64(value)
if d.DbApp.Count(&entity.DbQuery{InstanceId: instanceId}) != 0 { err = d.InstanceApp.Delete(rc.MetaCtx, instanceId)
instance, err := d.InstanceApp.GetById(new(entity.DbInstance), instanceId, "name") biz.ErrIsNilAppendErr(err, "删除数据库实例失败: %s")
biz.ErrIsNil(err, "获取数据库实例错误数据库实例ID为: %d", instance.Id)
biz.IsTrue(false, "不能删除数据库实例【%s】请先删除其关联的数据库资源。", instance.Name)
}
// todo check if backup task has been disabled and backup histories have been deleted
d.InstanceApp.Delete(rc.MetaCtx, instanceId)
} }
} }

View File

@@ -14,6 +14,7 @@ type DbBackupForm struct {
Interval time.Duration `json:"-"` // 间隔时间: 为零表示单次执行,为正表示反复执行 Interval time.Duration `json:"-"` // 间隔时间: 为零表示单次执行,为正表示反复执行
IntervalDay uint64 `json:"intervalDay"` // 间隔天数: 为零表示单次执行,为正表示反复执行 IntervalDay uint64 `json:"intervalDay"` // 间隔天数: 为零表示单次执行,为正表示反复执行
Repeated bool `json:"repeated"` // 是否重复执行 Repeated bool `json:"repeated"` // 是否重复执行
MaxSaveDays int `json:"maxSaveDays"` // 数据库备份历史保留天数,过期将自动删除
} }
func (restore *DbBackupForm) UnmarshalJSON(data []byte) error { func (restore *DbBackupForm) UnmarshalJSON(data []byte) error {

View File

@@ -15,6 +15,7 @@ type DbBackup struct {
StartTime time.Time `json:"startTime"` // 开始时间 StartTime time.Time `json:"startTime"` // 开始时间
Interval time.Duration `json:"-"` // 间隔时间 Interval time.Duration `json:"-"` // 间隔时间
IntervalDay uint64 `json:"intervalDay" gorm:"-"` // 间隔天数 IntervalDay uint64 `json:"intervalDay" gorm:"-"` // 间隔天数
MaxSaveDays int `json:"maxSaveDays"` // 数据库备份历史保留天数,过期将自动删除
Enabled bool `json:"enabled"` // 是否启用 Enabled bool `json:"enabled"` // 是否启用
EnabledDesc string `json:"enabledDesc"` // 启用状态描述 EnabledDesc string `json:"enabledDesc"` // 启用状态描述
LastTime timex.NullTime `json:"lastTime"` // 最近一次执行时间 LastTime timex.NullTime `json:"lastTime"` // 最近一次执行时间
@@ -29,9 +30,9 @@ func (backup *DbBackup) MarshalJSON() ([]byte, error) {
backup.IntervalDay = uint64(backup.Interval / time.Hour / 24) backup.IntervalDay = uint64(backup.Interval / time.Hour / 24)
if len(backup.EnabledDesc) == 0 { if len(backup.EnabledDesc) == 0 {
if backup.Enabled { if backup.Enabled {
backup.EnabledDesc = "任务已启用" backup.EnabledDesc = "已启用"
} else { } else {
backup.EnabledDesc = "任务已禁用" backup.EnabledDesc = "已禁用"
} }
} }
return json.Marshal((*dbBackup)(backup)) return json.Marshal((*dbBackup)(backup))

View File

@@ -30,9 +30,9 @@ func (restore *DbRestore) MarshalJSON() ([]byte, error) {
restore.IntervalDay = uint64(restore.Interval / time.Hour / 24) restore.IntervalDay = uint64(restore.Interval / time.Hour / 24)
if len(restore.EnabledDesc) == 0 { if len(restore.EnabledDesc) == 0 {
if restore.Enabled { if restore.Enabled {
restore.EnabledDesc = "任务已启用" restore.EnabledDesc = "已启用"
} else { } else {
restore.EnabledDesc = "任务已禁用" restore.EnabledDesc = "已禁用"
} }
} }
return json.Marshal((*dbBackup)(restore)) return json.Marshal((*dbBackup)(restore))

View File

@@ -25,10 +25,13 @@ func InitIoc() {
func Init() { func Init() {
sync.OnceFunc(func() { sync.OnceFunc(func() {
if err := GetDbBackupApp().Init(); err != nil { if err := GetDbBackupApp().Init(); err != nil {
panic(fmt.Sprintf("初始化 dbBackupApp 失败: %v", err)) panic(fmt.Sprintf("初始化 DbBackupApp 失败: %v", err))
} }
if err := GetDbRestoreApp().Init(); err != nil { if err := GetDbRestoreApp().Init(); err != nil {
panic(fmt.Sprintf("初始化 dbRestoreApp 失败: %v", err)) panic(fmt.Sprintf("初始化 DbRestoreApp 失败: %v", err))
}
if err := GetDbBinlogApp().Init(); err != nil {
panic(fmt.Sprintf("初始化 DbBinlogApp 失败: %v", err))
} }
GetDataSyncTaskApp().InitCronJob() GetDataSyncTaskApp().InitCronJob()
})() })()

View File

@@ -6,15 +6,24 @@ import (
"errors" "errors"
"fmt" "fmt"
"gorm.io/gorm" "gorm.io/gorm"
"math"
"mayfly-go/internal/db/domain/entity" "mayfly-go/internal/db/domain/entity"
"mayfly-go/internal/db/domain/repository" "mayfly-go/internal/db/domain/repository"
"mayfly-go/pkg/logx" "mayfly-go/pkg/logx"
"mayfly-go/pkg/model" "mayfly-go/pkg/model"
"mayfly-go/pkg/utils/timex"
"sync" "sync"
"time"
"github.com/google/uuid" "github.com/google/uuid"
) )
const maxBackupHistoryDays = 30
var (
errRestoringBackupHistory = errors.New("正在从备份历史中恢复数据库")
)
type DbBackupApp struct { type DbBackupApp struct {
scheduler *dbScheduler `inject:"DbScheduler"` scheduler *dbScheduler `inject:"DbScheduler"`
backupRepo repository.DbBackup `inject:"DbBackupRepo"` backupRepo repository.DbBackup `inject:"DbBackupRepo"`
@@ -22,6 +31,10 @@ type DbBackupApp struct {
restoreRepo repository.DbRestore `inject:"DbRestoreRepo"` restoreRepo repository.DbRestore `inject:"DbRestoreRepo"`
dbApp Db `inject:"DbApp"` dbApp Db `inject:"DbApp"`
mutex sync.Mutex mutex sync.Mutex
closed chan struct{}
wg sync.WaitGroup
ctx context.Context
cancel context.CancelFunc
} }
func (app *DbBackupApp) Init() error { func (app *DbBackupApp) Init() error {
@@ -32,11 +45,68 @@ func (app *DbBackupApp) Init() error {
if err := app.scheduler.AddJob(context.Background(), jobs); err != nil { if err := app.scheduler.AddJob(context.Background(), jobs); err != nil {
return err return err
} }
app.ctx, app.cancel = context.WithCancel(context.Background())
app.wg.Add(1)
go func() {
defer app.wg.Done()
for app.ctx.Err() == nil {
if err := app.prune(app.ctx); err != nil {
logx.Errorf("清理数据库备份历史失败: %s", err.Error())
timex.SleepWithContext(app.ctx, time.Minute*15)
continue
}
timex.SleepWithContext(app.ctx, time.Hour*24)
}
}()
return nil
}
func (app *DbBackupApp) prune(ctx context.Context) error {
var jobs []*entity.DbBackup
if err := app.backupRepo.ListByCond(map[string]any{}, &jobs); err != nil {
return err
}
for _, job := range jobs {
if ctx.Err() != nil {
return nil
}
var histories []*entity.DbBackupHistory
historyCond := map[string]any{
"db_backup_id": job.Id,
}
if err := app.backupHistoryRepo.ListByCondOrder(historyCond, &histories, "id"); err != nil {
return err
}
expiringTime := time.Now().Add(-math.MaxInt64)
if job.MaxSaveDays > 0 {
expiringTime = time.Now().Add(-time.Hour * 24 * time.Duration(job.MaxSaveDays+1))
}
for _, history := range histories {
if ctx.Err() != nil {
return nil
}
if history.CreateTime.After(expiringTime) {
break
}
err := app.DeleteHistory(ctx, history.Id)
if errors.Is(err, errRestoringBackupHistory) {
break
}
if err != nil {
return err
}
}
}
return nil return nil
} }
func (app *DbBackupApp) Close() { func (app *DbBackupApp) Close() {
app.scheduler.Close() app.scheduler.Close()
if app.cancel != nil {
app.cancel()
app.cancel = nil
}
app.wg.Wait()
} }
func (app *DbBackupApp) Create(ctx context.Context, jobs []*entity.DbBackup) error { func (app *DbBackupApp) Create(ctx context.Context, jobs []*entity.DbBackup) error {
@@ -61,7 +131,6 @@ func (app *DbBackupApp) Update(ctx context.Context, job *entity.DbBackup) error
} }
func (app *DbBackupApp) Delete(ctx context.Context, jobId uint64) error { func (app *DbBackupApp) Delete(ctx context.Context, jobId uint64) error {
// todo: 删除数据库备份历史文件
app.mutex.Lock() app.mutex.Lock()
defer app.mutex.Unlock() defer app.mutex.Unlock()
@@ -76,7 +145,7 @@ func (app *DbBackupApp) Delete(ctx context.Context, jobId uint64) error {
default: default:
return err return err
case err == nil: case err == nil:
return fmt.Errorf("数据库备份存在历史记录【%s】无法删除该任务", history.Name) return fmt.Errorf("请先删除关联的数据库备份历史【%s】", history.Name)
case errors.Is(err, gorm.ErrRecordNotFound): case errors.Is(err, gorm.ErrRecordNotFound):
} }
if err := app.backupRepo.DeleteById(ctx, jobId); err != nil { if err := app.backupRepo.DeleteById(ctx, jobId); err != nil {
@@ -184,27 +253,18 @@ func NewIncUUID() (uuid.UUID, error) {
} }
func (app *DbBackupApp) DeleteHistory(ctx context.Context, historyId uint64) (retErr error) { func (app *DbBackupApp) DeleteHistory(ctx context.Context, historyId uint64) (retErr error) {
// todo: 删除数据库备份历史文件
app.mutex.Lock() app.mutex.Lock()
defer app.mutex.Unlock() defer app.mutex.Unlock()
if _, err := app.backupHistoryRepo.UpdateDeleting(false, historyId); err != nil {
return err
}
ok, err := app.backupHistoryRepo.UpdateDeleting(true, historyId) ok, err := app.backupHistoryRepo.UpdateDeleting(true, historyId)
if err != nil { if err != nil {
return err return err
} }
defer func() {
_, err = app.backupHistoryRepo.UpdateDeleting(false, historyId)
if err == nil {
return
}
if retErr == nil {
retErr = err
return
}
retErr = fmt.Errorf("%w, %w", retErr, err)
}()
if !ok { if !ok {
return errors.New("正在从备份历史中恢复数据库") return errRestoringBackupHistory
} }
job := &entity.DbBackupHistory{} job := &entity.DbBackupHistory{}
if err := app.backupHistoryRepo.GetById(job, historyId); err != nil { if err := app.backupHistoryRepo.GetById(job, historyId); err != nil {
@@ -214,7 +274,10 @@ func (app *DbBackupApp) DeleteHistory(ctx context.Context, historyId uint64) (re
if err != nil { if err != nil {
return err return err
} }
dbProgram := conn.GetDialect().GetDbProgram() dbProgram, err := conn.GetDialect().GetDbProgram()
if err != nil {
return err
}
if err := dbProgram.RemoveBackupHistory(ctx, job.DbBackupId, job.Uuid); err != nil { if err := dbProgram.RemoveBackupHistory(ctx, job.DbBackupId, job.Uuid); err != nil {
return err return err
} }

View File

@@ -2,6 +2,7 @@ package application
import ( import (
"context" "context"
"math"
"mayfly-go/internal/db/domain/entity" "mayfly-go/internal/db/domain/entity"
"mayfly-go/internal/db/domain/repository" "mayfly-go/internal/db/domain/repository"
"mayfly-go/pkg/logx" "mayfly-go/pkg/logx"
@@ -11,9 +12,13 @@ import (
) )
type DbBinlogApp struct { type DbBinlogApp struct {
scheduler *dbScheduler `inject:"DbScheduler"` scheduler *dbScheduler `inject:"DbScheduler"`
binlogRepo repository.DbBinlog `inject:"DbBinlogRepo"` binlogRepo repository.DbBinlog `inject:"DbBinlogRepo"`
backupRepo repository.DbBackup `inject:"DbBackupRepo"` binlogHistoryRepo repository.DbBinlogHistory `inject:"DbBinlogHistoryRepo"`
backupRepo repository.DbBackup `inject:"DbBackupRepo"`
backupHistoryRepo repository.DbBackupHistory `inject:"DbBackupHistoryRepo"`
instanceRepo repository.Instance `inject:"DbInstanceRepo"`
dbApp Db `inject:"DbApp"`
context context.Context context context.Context
cancel context.CancelFunc cancel context.CancelFunc
@@ -26,41 +31,113 @@ func newDbBinlogApp() *DbBinlogApp {
context: ctx, context: ctx,
cancel: cancel, cancel: cancel,
} }
svc.waitGroup.Add(1)
go svc.run()
return svc return svc
} }
func (app *DbBinlogApp) Init() error {
app.context, app.cancel = context.WithCancel(context.Background())
app.waitGroup.Add(1)
go app.run()
return nil
}
func (app *DbBinlogApp) run() { func (app *DbBinlogApp) run() {
defer app.waitGroup.Done() defer app.waitGroup.Done()
// todo: 实现 binlog 并发下载 for app.context.Err() == nil {
timex.SleepWithContext(app.context, time.Minute) if err := app.fetchBinlog(app.context); err != nil {
for !app.closed() {
jobs, err := app.loadJobs()
if err != nil {
logx.Errorf("DbBinlogApp: 加载 BINLOG 同步任务失败: %s", err.Error())
timex.SleepWithContext(app.context, time.Minute) timex.SleepWithContext(app.context, time.Minute)
continue continue
} }
if app.closed() { if err := app.pruneBinlog(app.context); err != nil {
break timex.SleepWithContext(app.context, time.Minute)
} continue
if err := app.scheduler.AddJob(app.context, jobs); err != nil {
logx.Error("DbBinlogApp: 添加 BINLOG 同步任务失败: ", err.Error())
} }
timex.SleepWithContext(app.context, entity.BinlogDownloadInterval) timex.SleepWithContext(app.context, entity.BinlogDownloadInterval)
} }
} }
func (app *DbBinlogApp) loadJobs() ([]*entity.DbBinlog, error) { func (app *DbBinlogApp) fetchBinlog(ctx context.Context) error {
jobs, err := app.loadJobs(ctx)
if err != nil {
logx.Errorf("DbBinlogApp: 加载 BINLOG 同步任务失败: %s", err.Error())
timex.SleepWithContext(app.context, time.Minute)
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
if err := app.scheduler.AddJob(app.context, jobs); err != nil {
logx.Error("DbBinlogApp: 添加 BINLOG 同步任务失败: ", err.Error())
return err
}
return nil
}
func (app *DbBinlogApp) pruneBinlog(ctx context.Context) error {
var jobs []*entity.DbBinlog
if err := app.binlogRepo.ListByCond(map[string]any{}, &jobs); err != nil {
logx.Error("DbBinlogApp: 获取 BINLOG 同步任务失败: ", err.Error())
return err
}
for _, instance := range jobs {
if ctx.Err() != nil {
return ctx.Err()
}
var histories []*entity.DbBinlogHistory
backupHistory, backupHistoryExists, err := app.backupHistoryRepo.GetEarliestHistoryForBinlog(instance.Id)
if err != nil {
logx.Errorf("DbBinlogApp: 获取数据库备份历史失败: %s", err.Error())
return err
}
var binlogSeq int64 = math.MaxInt64
if backupHistoryExists {
binlogSeq = backupHistory.BinlogSequence
}
if err := app.binlogHistoryRepo.GetHistoriesBeforeSequence(ctx, instance.Id, binlogSeq, &histories); err != nil {
logx.Errorf("DbBinlogApp: 获取数据库 BINLOG 历史失败: %s", err.Error())
return err
}
conn, err := app.dbApp.GetDbConnByInstanceId(instance.Id)
if err != nil {
logx.Errorf("DbBinlogApp: 创建数据库连接失败: %s", err.Error())
return err
}
dbProgram, err := conn.GetDialect().GetDbProgram()
if err != nil {
logx.Errorf("DbBinlogApp: 获取数据库备份与恢复程序失败: %s", err.Error())
return err
}
for i, history := range histories {
// todo: 在避免并发访问的前提下删除本地最新的 BINLOG 文件
if !backupHistoryExists && i == len(histories)-1 {
// 暂不删除本地最新的 BINLOG 文件
break
}
if ctx.Err() != nil {
return ctx.Err()
}
if err := dbProgram.PruneBinlog(history); err != nil {
logx.Errorf("清理 BINLOG 文件失败: %v", err)
continue
}
if err := app.binlogHistoryRepo.DeleteById(ctx, history.Id); err != nil {
logx.Errorf("删除 BINLOG 历史失败: %v", err)
continue
}
}
}
return nil
}
func (app *DbBinlogApp) loadJobs(ctx context.Context) ([]*entity.DbBinlog, error) {
var instanceIds []uint64 var instanceIds []uint64
if err := app.backupRepo.ListDbInstances(true, true, &instanceIds); err != nil { if err := app.backupRepo.ListDbInstances(true, true, &instanceIds); err != nil {
return nil, err return nil, err
} }
jobs := make([]*entity.DbBinlog, 0, len(instanceIds)) jobs := make([]*entity.DbBinlog, 0, len(instanceIds))
for _, id := range instanceIds { for _, id := range instanceIds {
if app.closed() { if ctx.Err() != nil {
break break
} }
binlog := entity.NewDbBinlog(id) binlog := entity.NewDbBinlog(id)
@@ -73,14 +150,15 @@ func (app *DbBinlogApp) loadJobs() ([]*entity.DbBinlog, error) {
} }
func (app *DbBinlogApp) Close() { func (app *DbBinlogApp) Close() {
app.cancel() cancel := app.cancel
if cancel == nil {
return
}
app.cancel = nil
cancel()
app.waitGroup.Wait() app.waitGroup.Wait()
} }
func (app *DbBinlogApp) closed() bool {
return app.context.Err() != nil
}
func (app *DbBinlogApp) AddJobIfNotExists(ctx context.Context, job *entity.DbBinlog) error { func (app *DbBinlogApp) AddJobIfNotExists(ctx context.Context, job *entity.DbBinlog) error {
if err := app.binlogRepo.AddJobIfNotExists(ctx, job); err != nil { if err := app.binlogRepo.AddJobIfNotExists(ctx, job); err != nil {
return err return err
@@ -90,11 +168,3 @@ func (app *DbBinlogApp) AddJobIfNotExists(ctx context.Context, job *entity.DbBin
} }
return nil return nil
} }
func (app *DbBinlogApp) Delete(ctx context.Context, jobId uint64) error {
// todo: 删除 Binlog 历史文件
if err := app.binlogRepo.DeleteById(ctx, jobId); err != nil {
return err
}
return nil
}

View File

@@ -2,11 +2,14 @@ package application
import ( import (
"context" "context"
"errors"
"gorm.io/gorm"
"mayfly-go/internal/db/dbm" "mayfly-go/internal/db/dbm"
"mayfly-go/internal/db/dbm/dbi" "mayfly-go/internal/db/dbm/dbi"
"mayfly-go/internal/db/domain/entity" "mayfly-go/internal/db/domain/entity"
"mayfly-go/internal/db/domain/repository" "mayfly-go/internal/db/domain/repository"
"mayfly-go/pkg/base" "mayfly-go/pkg/base"
"mayfly-go/pkg/biz"
"mayfly-go/pkg/errorx" "mayfly-go/pkg/errorx"
"mayfly-go/pkg/model" "mayfly-go/pkg/model"
) )
@@ -32,6 +35,10 @@ type Instance interface {
type instanceAppImpl struct { type instanceAppImpl struct {
base.AppImpl[*entity.DbInstance, repository.Instance] base.AppImpl[*entity.DbInstance, repository.Instance]
dbApp Db `inject:"DbApp"`
backupApp *DbBackupApp `inject:"DbBackupApp"`
restoreApp *DbRestoreApp `inject:"DbRestoreApp"`
} }
// 注入DbInstanceRepo // 注入DbInstanceRepo
@@ -96,8 +103,50 @@ func (app *instanceAppImpl) Save(ctx context.Context, instanceEntity *entity.DbI
return app.UpdateById(ctx, instanceEntity) return app.UpdateById(ctx, instanceEntity)
} }
func (app *instanceAppImpl) Delete(ctx context.Context, id uint64) error { func (app *instanceAppImpl) Delete(ctx context.Context, instanceId uint64) error {
return app.DeleteById(ctx, id) instance, err := app.GetById(new(entity.DbInstance), instanceId, "name")
biz.ErrIsNil(err, "获取数据库实例错误数据库实例ID为: %d", instance.Id)
restore := &entity.DbRestore{
DbInstanceId: instanceId,
}
err = app.restoreApp.restoreRepo.GetBy(restore)
switch {
case err == nil:
biz.ErrNotNil(err, "不能删除数据库实例【%s】请先删除关联的数据库恢复任务。", instance.Name)
case errors.Is(err, gorm.ErrRecordNotFound):
break
default:
biz.ErrIsNil(err, "删除数据库实例失败: %v", err)
}
backup := &entity.DbBackup{
DbInstanceId: instanceId,
}
err = app.backupApp.backupRepo.GetBy(backup)
switch {
case err == nil:
biz.ErrNotNil(err, "不能删除数据库实例【%s】请先删除关联的数据库备份任务。", instance.Name)
case errors.Is(err, gorm.ErrRecordNotFound):
break
default:
biz.ErrIsNil(err, "删除数据库实例失败: %v", err)
}
db := &entity.Db{
InstanceId: instanceId,
}
err = app.dbApp.GetBy(db)
switch {
case err == nil:
biz.ErrNotNil(err, "不能删除数据库实例【%s】请先删除关联的数据库资源。", instance.Name)
case errors.Is(err, gorm.ErrRecordNotFound):
break
default:
biz.ErrIsNil(err, "删除数据库实例失败: %v", err)
}
return app.DeleteById(ctx, instanceId)
} }
func (app *instanceAppImpl) GetDatabases(ed *entity.DbInstance) ([]string, error) { func (app *instanceAppImpl) GetDatabases(ed *entity.DbInstance) ([]string, error) {

View File

@@ -55,7 +55,6 @@ func (app *DbRestoreApp) Update(ctx context.Context, job *entity.DbRestore) erro
} }
func (app *DbRestoreApp) Delete(ctx context.Context, jobId uint64) error { func (app *DbRestoreApp) Delete(ctx context.Context, jobId uint64) error {
// todo: 删除数据库恢复历史文件
app.mutex.Lock() app.mutex.Lock()
defer app.mutex.Unlock() defer app.mutex.Unlock()

View File

@@ -4,12 +4,14 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"golang.org/x/sync/singleflight"
"gorm.io/gorm" "gorm.io/gorm"
"mayfly-go/internal/db/dbm/dbi" "mayfly-go/internal/db/dbm/dbi"
"mayfly-go/internal/db/domain/entity" "mayfly-go/internal/db/domain/entity"
"mayfly-go/internal/db/domain/repository" "mayfly-go/internal/db/domain/repository"
"mayfly-go/pkg/runner" "mayfly-go/pkg/runner"
"reflect" "reflect"
"strconv"
"sync" "sync"
"time" "time"
) )
@@ -28,6 +30,7 @@ type dbScheduler struct {
restoreHistoryRepo repository.DbRestoreHistory `inject:"DbRestoreHistoryRepo"` restoreHistoryRepo repository.DbRestoreHistory `inject:"DbRestoreHistoryRepo"`
binlogRepo repository.DbBinlog `inject:"DbBinlogRepo"` binlogRepo repository.DbBinlog `inject:"DbBinlogRepo"`
binlogHistoryRepo repository.DbBinlogHistory `inject:"DbBinlogHistoryRepo"` binlogHistoryRepo repository.DbBinlogHistory `inject:"DbBinlogHistoryRepo"`
sfGroup singleflight.Group
} }
func newDbScheduler() *dbScheduler { func newDbScheduler() *dbScheduler {
@@ -76,7 +79,6 @@ func (s *dbScheduler) AddJob(ctx context.Context, jobs any) error {
} }
func (s *dbScheduler) RemoveJob(ctx context.Context, jobType entity.DbJobType, jobId uint64) error { func (s *dbScheduler) RemoveJob(ctx context.Context, jobType entity.DbJobType, jobId uint64) error {
// todo: 删除数据库备份历史文件
s.mutex.Lock() s.mutex.Lock()
defer s.mutex.Unlock() defer s.mutex.Unlock()
@@ -110,12 +112,11 @@ func (s *dbScheduler) StartJobNow(ctx context.Context, job entity.DbJob) error {
return nil return nil
} }
func (s *dbScheduler) backup(ctx context.Context, dbProgram dbi.DbProgram, job entity.DbJob) error { func (s *dbScheduler) backup(ctx context.Context, dbProgram dbi.DbProgram, backup *entity.DbBackup) error {
id, err := NewIncUUID() id, err := NewIncUUID()
if err != nil { if err != nil {
return err return err
} }
backup := job.(*entity.DbBackup)
history := &entity.DbBackupHistory{ history := &entity.DbBackupHistory{
Uuid: id.String(), Uuid: id.String(),
DbBackupId: backup.Id, DbBackupId: backup.Id,
@@ -143,45 +144,29 @@ func (s *dbScheduler) backup(ctx context.Context, dbProgram dbi.DbProgram, job e
return nil return nil
} }
func (s *dbScheduler) restore(ctx context.Context, dbProgram dbi.DbProgram, job entity.DbJob) error { func (s *dbScheduler) singleFlightFetchBinlog(ctx context.Context, dbProgram dbi.DbProgram, instanceId uint64, targetTime time.Time) error {
restore := job.(*entity.DbRestore) key := strconv.FormatUint(instanceId, 10)
for ctx.Err() == nil {
c := s.sfGroup.DoChan(key, func() (interface{}, error) {
if err := s.fetchBinlog(ctx, dbProgram, instanceId, true, targetTime); err != nil {
return targetTime, err
}
return targetTime, nil
})
select {
case res := <-c:
if targetTime.Compare(res.Val.(time.Time)) <= 0 {
return res.Err
}
case <-ctx.Done():
}
}
return ctx.Err()
}
func (s *dbScheduler) restore(ctx context.Context, dbProgram dbi.DbProgram, restore *entity.DbRestore) error {
if restore.PointInTime.Valid { if restore.PointInTime.Valid {
//if enabled, err := dbProgram.CheckBinlogEnabled(ctx); err != nil { if err := s.fetchBinlog(ctx, dbProgram, restore.DbInstanceId, true, restore.PointInTime.Time); err != nil {
// return err
//} else if !enabled {
// return errors.New("数据库未启用 BINLOG")
//}
//if enabled, err := dbProgram.CheckBinlogRowFormat(ctx); err != nil {
// return err
//} else if !enabled {
// return errors.New("数据库未启用 BINLOG 行模式")
//}
//
//latestBinlogSequence, earliestBackupSequence := int64(-1), int64(-1)
//binlogHistory, ok, err := s.binlogHistoryRepo.GetLatestHistory(restore.DbInstanceId)
//if err != nil {
// return err
//}
//if ok {
// latestBinlogSequence = binlogHistory.Sequence
//} else {
// backupHistory, ok, err := s.backupHistoryRepo.GetEarliestHistory(restore.DbInstanceId)
// if err != nil {
// return err
// }
// if !ok {
// return nil
// }
// earliestBackupSequence = backupHistory.BinlogSequence
//}
//binlogFiles, err := dbProgram.FetchBinlogs(ctx, true, earliestBackupSequence, latestBinlogSequence)
//if err != nil {
// return err
//}
//if err := s.binlogHistoryRepo.InsertWithBinlogFiles(ctx, restore.DbInstanceId, binlogFiles); err != nil {
// return err
//}
if err := s.fetchBinlog(ctx, dbProgram, job.GetInstanceId(), true); err != nil {
return err return err
} }
if err := s.restorePointInTime(ctx, dbProgram, restore); err != nil { if err := s.restorePointInTime(ctx, dbProgram, restore); err != nil {
@@ -210,102 +195,68 @@ func (s *dbScheduler) restore(ctx context.Context, dbProgram dbi.DbProgram, job
return nil return nil
} }
//func (s *dbScheduler) updateLastStatus(ctx context.Context, job entity.DbJob) error {
// switch typ := job.GetJobType(); typ {
// case entity.DbJobTypeBackup:
// return s.backupRepo.UpdateLastStatus(ctx, job)
// case entity.DbJobTypeRestore:
// return s.restoreRepo.UpdateLastStatus(ctx, job)
// case entity.DbJobTypeBinlog:
// return s.binlogRepo.UpdateLastStatus(ctx, job)
// default:
// panic(fmt.Errorf("无效的数据库任务类型: %v", typ))
// }
//}
func (s *dbScheduler) updateJob(ctx context.Context, job entity.DbJob) error { func (s *dbScheduler) updateJob(ctx context.Context, job entity.DbJob) error {
switch typ := job.GetJobType(); typ { switch t := job.(type) {
case entity.DbJobTypeBackup: case *entity.DbBackup:
return s.backupRepo.UpdateById(ctx, job) return s.backupRepo.UpdateById(ctx, t)
case entity.DbJobTypeRestore: case *entity.DbRestore:
return s.restoreRepo.UpdateById(ctx, job) return s.restoreRepo.UpdateById(ctx, t)
case entity.DbJobTypeBinlog: case *entity.DbBinlog:
return s.binlogRepo.UpdateById(ctx, job) return s.binlogRepo.UpdateById(ctx, t)
default: default:
return fmt.Errorf("无效的数据库任务类型: %v", typ) return fmt.Errorf("无效的数据库任务类型: %T", t)
} }
} }
func (s *dbScheduler) runJob(ctx context.Context, job entity.DbJob) error { func (s *dbScheduler) runJob(ctx context.Context, job entity.DbJob) error {
//job.SetLastStatus(entity.DbJobRunning, nil)
//if err := s.updateLastStatus(ctx, job); err != nil {
// logx.Errorf("failed to update job status: %v", err)
// return
//}
//var errRun error
conn, err := s.dbApp.GetDbConnByInstanceId(job.GetInstanceId()) conn, err := s.dbApp.GetDbConnByInstanceId(job.GetInstanceId())
if err != nil { if err != nil {
return err return err
} }
dbProgram := conn.GetDialect().GetDbProgram() dbProgram, err := conn.GetDialect().GetDbProgram()
switch typ := job.GetJobType(); typ { if err != nil {
case entity.DbJobTypeBackup: return err
return s.backup(ctx, dbProgram, job) }
case entity.DbJobTypeRestore: switch t := job.(type) {
return s.restore(ctx, dbProgram, job) case *entity.DbBackup:
case entity.DbJobTypeBinlog: return s.backup(ctx, dbProgram, t)
return s.fetchBinlog(ctx, dbProgram, job.GetInstanceId(), false) case *entity.DbRestore:
default: return s.restore(ctx, dbProgram, t)
return fmt.Errorf("无效的数据库任务类型: %v", typ) case *entity.DbBinlog:
return s.fetchBinlog(ctx, dbProgram, t.DbInstanceId, false, time.Now())
default:
return fmt.Errorf("无效的数据库任务类型: %T", t)
} }
//status := entity.DbJobSuccess
//if errRun != nil {
// status = entity.DbJobFailed
//}
//job.SetLastStatus(status, errRun)
//if err := s.updateLastStatus(ctx, job); err != nil {
// logx.Errorf("failed to update job status: %v", err)
// return
//}
} }
func (s *dbScheduler) runnableJob(job entity.DbJob, next runner.NextJobFunc[entity.DbJob]) (bool, error) { func (s *dbScheduler) runnableJob(job entity.DbJob, nextRunning runner.NextJobFunc[entity.DbJob]) (bool, error) {
if job.IsExpired() { if job.IsExpired() {
return false, runner.ErrJobExpired return false, runner.ErrJobExpired
} }
const maxCountByInstanceId = 4 const maxCountByInstanceId = 4
const maxCountByDbName = 1 const maxCountByDbName = 1
var countByInstanceId, countByDbName int var countByInstanceId, countByDbName int
for item, ok := next(); ok; item, ok = next() { for item, ok := nextRunning(); ok; item, ok = nextRunning() {
if job.GetInstanceId() == item.GetInstanceId() { if job.GetInstanceId() == item.GetInstanceId() {
countByInstanceId++ countByInstanceId++
if countByInstanceId >= maxCountByInstanceId { if countByInstanceId >= maxCountByInstanceId {
return false, nil return false, nil
} }
if relatedToBinlog(job.GetJobType()) {
// todo: 恢复数据库前触发 BINLOG 同步BINLOG 同步完成后才能恢复数据库
if relatedToBinlog(item.GetJobType()) {
return false, nil
}
}
if job.GetDbName() == item.GetDbName() { if job.GetDbName() == item.GetDbName() {
countByDbName++ countByDbName++
if countByDbName >= maxCountByDbName { if countByDbName >= maxCountByDbName {
return false, nil return false, nil
} }
} }
if (job.GetJobType() == entity.DbJobTypeBinlog && item.GetJobType() == entity.DbJobTypeRestore) ||
(job.GetJobType() == entity.DbJobTypeRestore && item.GetJobType() == entity.DbJobTypeBinlog) {
return false, nil
}
} }
} }
return true, nil return true, nil
} }
func relatedToBinlog(typ entity.DbJobType) bool {
return typ == entity.DbJobTypeRestore || typ == entity.DbJobTypeBinlog
}
func (s *dbScheduler) restorePointInTime(ctx context.Context, dbProgram dbi.DbProgram, job *entity.DbRestore) error { func (s *dbScheduler) restorePointInTime(ctx context.Context, dbProgram dbi.DbProgram, job *entity.DbRestore) error {
binlogHistory, err := s.binlogHistoryRepo.GetHistoryByTime(job.DbInstanceId, job.PointInTime.Time) binlogHistory, err := s.binlogHistoryRepo.GetHistoryByTime(job.DbInstanceId, job.PointInTime.Time)
if err != nil { if err != nil {
@@ -320,7 +271,7 @@ func (s *dbScheduler) restorePointInTime(ctx context.Context, dbProgram dbi.DbPr
Sequence: binlogHistory.Sequence, Sequence: binlogHistory.Sequence,
Position: position, Position: position,
} }
backupHistory, err := s.backupHistoryRepo.GetLatestHistory(job.DbInstanceId, job.DbName, target) backupHistory, err := s.backupHistoryRepo.GetLatestHistoryForBinlog(job.DbInstanceId, job.DbName, target)
if err != nil { if err != nil {
return err return err
} }
@@ -364,6 +315,9 @@ func (s *dbScheduler) restorePointInTime(ctx context.Context, dbProgram dbi.DbPr
} }
func (s *dbScheduler) restoreBackupHistory(ctx context.Context, program dbi.DbProgram, backupHistory *entity.DbBackupHistory) (retErr error) { func (s *dbScheduler) restoreBackupHistory(ctx context.Context, program dbi.DbProgram, backupHistory *entity.DbBackupHistory) (retErr error) {
if _, err := s.backupHistoryRepo.UpdateRestoring(false, backupHistory.Id); err != nil {
return err
}
ok, err := s.backupHistoryRepo.UpdateRestoring(true, backupHistory.Id) ok, err := s.backupHistoryRepo.UpdateRestoring(true, backupHistory.Id)
if err != nil { if err != nil {
return err return err
@@ -385,7 +339,7 @@ func (s *dbScheduler) restoreBackupHistory(ctx context.Context, program dbi.DbPr
return program.RestoreBackupHistory(ctx, backupHistory.DbName, backupHistory.DbBackupId, backupHistory.Uuid) return program.RestoreBackupHistory(ctx, backupHistory.DbName, backupHistory.DbBackupId, backupHistory.Uuid)
} }
func (s *dbScheduler) fetchBinlog(ctx context.Context, dbProgram dbi.DbProgram, instanceId uint64, downloadLatestBinlogFile bool) error { func (s *dbScheduler) fetchBinlog(ctx context.Context, dbProgram dbi.DbProgram, instanceId uint64, downloadLatestBinlogFile bool, targetTime time.Time) error {
if enabled, err := dbProgram.CheckBinlogEnabled(ctx); err != nil { if enabled, err := dbProgram.CheckBinlogEnabled(ctx); err != nil {
return err return err
} else if !enabled { } else if !enabled {
@@ -397,15 +351,17 @@ func (s *dbScheduler) fetchBinlog(ctx context.Context, dbProgram dbi.DbProgram,
return errors.New("数据库未启用 BINLOG 行模式") return errors.New("数据库未启用 BINLOG 行模式")
} }
latestBinlogSequence, earliestBackupSequence := int64(-1), int64(-1) earliestBackupSequence := int64(-1)
binlogHistory, ok, err := s.binlogHistoryRepo.GetLatestHistory(instanceId) binlogHistory, ok, err := s.binlogHistoryRepo.GetLatestHistory(instanceId)
if err != nil { if err != nil {
return err return err
} }
if ok { if downloadLatestBinlogFile && targetTime.Before(binlogHistory.LastEventTime) {
latestBinlogSequence = binlogHistory.Sequence return nil
} else { }
backupHistory, ok, err := s.backupHistoryRepo.GetEarliestHistory(instanceId)
if !ok {
backupHistory, ok, err := s.backupHistoryRepo.GetEarliestHistoryForBinlog(instanceId)
if err != nil { if err != nil {
return err return err
} }
@@ -414,7 +370,9 @@ func (s *dbScheduler) fetchBinlog(ctx context.Context, dbProgram dbi.DbProgram,
} }
earliestBackupSequence = backupHistory.BinlogSequence earliestBackupSequence = backupHistory.BinlogSequence
} }
binlogFiles, err := dbProgram.FetchBinlogs(ctx, downloadLatestBinlogFile, earliestBackupSequence, latestBinlogSequence)
// todo: 将循环从 dbProgram.FetchBinlogs 中提取出来,实现 BINLOG 同步成功后逐一保存 binlogHistory
binlogFiles, err := dbProgram.FetchBinlogs(ctx, downloadLatestBinlogFile, earliestBackupSequence, binlogHistory)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -13,7 +13,7 @@ type DbProgram interface {
Backup(ctx context.Context, backupHistory *entity.DbBackupHistory) (*entity.BinlogInfo, error) Backup(ctx context.Context, backupHistory *entity.DbBackupHistory) (*entity.BinlogInfo, error)
FetchBinlogs(ctx context.Context, downloadLatestBinlogFile bool, earliestBackupSequence, latestBinlogSequence int64) ([]*entity.BinlogFile, error) FetchBinlogs(ctx context.Context, downloadLatestBinlogFile bool, earliestBackupSequence int64, latestBinlogHistory *entity.DbBinlogHistory) ([]*entity.BinlogFile, error)
ReplayBinlog(ctx context.Context, originalDatabase, targetDatabase string, restoreInfo *RestoreInfo) error ReplayBinlog(ctx context.Context, originalDatabase, targetDatabase string, restoreInfo *RestoreInfo) error
@@ -22,6 +22,8 @@ type DbProgram interface {
RemoveBackupHistory(ctx context.Context, dbBackupId uint64, dbBackupHistoryUuid string) error RemoveBackupHistory(ctx context.Context, dbBackupId uint64, dbBackupHistoryUuid string) error
GetBinlogEventPositionAtOrAfterTime(ctx context.Context, binlogName string, targetTime time.Time) (position int64, parseErr error) GetBinlogEventPositionAtOrAfterTime(ctx context.Context, binlogName string, targetTime time.Time) (position int64, parseErr error)
PruneBinlog(history *entity.DbBinlogHistory) error
} }
type RestoreInfo struct { type RestoreInfo struct {

View File

@@ -141,3 +141,12 @@ func (dbType DbType) StmtUseDatabase(dbName string) string {
return "" return ""
} }
} }
func (dbType DbType) SupportingBackup() bool {
switch dbType {
case DbTypeMysql, DbTypeMariadb:
return true
default:
return false
}
}

View File

@@ -108,7 +108,7 @@ type Dialect interface {
GetSchemas() ([]string, error) GetSchemas() ([]string, error)
// GetDbProgram 获取数据库程序模块,用于数据库备份与恢复 // GetDbProgram 获取数据库程序模块,用于数据库备份与恢复
GetDbProgram() DbProgram GetDbProgram() (DbProgram, error)
// 批量保存数据 // 批量保存数据
BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error)

View File

@@ -248,8 +248,8 @@ func (dd *DMDialect) GetSchemas() ([]string, error) {
} }
// GetDbProgram 获取数据库程序模块,用于数据库备份与恢复 // GetDbProgram 获取数据库程序模块,用于数据库备份与恢复
func (dd *DMDialect) GetDbProgram() dbi.DbProgram { func (dd *DMDialect) GetDbProgram() (dbi.DbProgram, error) {
panic("implement me") return nil, fmt.Errorf("该数据库类型不支持数据库备份与恢复: %v", dd.dc.Info.Type)
} }
var ( var (

View File

@@ -285,8 +285,8 @@ func (md *MssqlDialect) GetSchemas() ([]string, error) {
} }
// GetDbProgram 获取数据库程序模块,用于数据库备份与恢复 // GetDbProgram 获取数据库程序模块,用于数据库备份与恢复
func (md *MssqlDialect) GetDbProgram() dbi.DbProgram { func (md *MssqlDialect) GetDbProgram() (dbi.DbProgram, error) {
panic("implement me") return nil, fmt.Errorf("该数据库类型不支持数据库备份与恢复: %v", md.dc.Info.Type)
} }
func (md *MssqlDialect) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) { func (md *MssqlDialect) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) {

View File

@@ -169,8 +169,8 @@ func (md *MysqlDialect) GetSchemas() ([]string, error) {
} }
// GetDbProgram 获取数据库程序模块,用于数据库备份与恢复 // GetDbProgram 获取数据库程序模块,用于数据库备份与恢复
func (md *MysqlDialect) GetDbProgram() dbi.DbProgram { func (md *MysqlDialect) GetDbProgram() (dbi.DbProgram, error) {
return NewDbProgramMysql(md.dc) return NewDbProgramMysql(md.dc), nil
} }
func (md *MysqlDialect) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) { func (md *MysqlDialect) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) {

View File

@@ -2,6 +2,7 @@ package mysql
import ( import (
"bufio" "bufio"
"compress/gzip"
"context" "context"
"database/sql" "database/sql"
"fmt" "fmt"
@@ -130,22 +131,46 @@ func (svc *DbProgramMysql) Backup(ctx context.Context, backupHistory *entity.DbB
if binlogEnabled && rowFormatEnabled { if binlogEnabled && rowFormatEnabled {
binlogInfo, err = readBinlogInfoFromBackup(reader) binlogInfo, err = readBinlogInfoFromBackup(reader)
} }
_ = reader.Close()
if err != nil { if err != nil {
_ = reader.Close()
return nil, errors.Wrapf(err, "从备份文件中读取 binlog 信息失败") return nil, errors.Wrapf(err, "从备份文件中读取 binlog 信息失败")
} }
fileName := filepath.Join(dir, fmt.Sprintf("%s.sql", backupHistory.Uuid))
if err := os.Rename(tmpFile, fileName); err != nil {
return nil, errors.Wrap(err, "备份文件改名失败")
}
if _, err := reader.Seek(0, io.SeekStart); err != nil {
_ = reader.Close()
return nil, errors.Wrapf(err, "跳转到备份文件开始处失败")
}
gzipTmpFile := tmpFile + ".gz"
writer, err := os.Create(gzipTmpFile)
if err != nil {
_ = reader.Close()
return nil, errors.Wrapf(err, "创建备份压缩文件失败")
}
defer func() {
_ = os.Remove(gzipTmpFile)
}()
gzipWriter := gzip.NewWriter(writer)
gzipWriter.Name = backupHistory.Uuid + ".sql"
_, err = io.Copy(gzipWriter, reader)
_ = gzipWriter.Close()
_ = writer.Close()
_ = reader.Close()
if err != nil {
return nil, errors.Wrapf(err, "压缩备份文件失败")
}
destPath := filepath.Join(dir, backupHistory.Uuid+".sql")
if err := os.Rename(gzipTmpFile, destPath+".gz"); err != nil {
return nil, errors.Wrap(err, "备份文件更名失败")
}
return binlogInfo, nil return binlogInfo, nil
} }
func (svc *DbProgramMysql) RemoveBackupHistory(_ context.Context, dbBackupId uint64, dbBackupHistoryUuid string) error { func (svc *DbProgramMysql) RemoveBackupHistory(_ context.Context, dbBackupId uint64, dbBackupHistoryUuid string) error {
fileName := filepath.Join(svc.getDbBackupDir(svc.dbInfo().InstanceId, dbBackupId), fileName := filepath.Join(svc.getDbBackupDir(svc.dbInfo().InstanceId, dbBackupId),
fmt.Sprintf("%v.sql", dbBackupHistoryUuid)) fmt.Sprintf("%v.sql", dbBackupHistoryUuid))
return os.Remove(fileName) _ = os.Remove(fileName)
_ = os.Remove(fileName + ".gz")
return nil
} }
func (svc *DbProgramMysql) RestoreBackupHistory(ctx context.Context, dbName string, dbBackupId uint64, dbBackupHistoryUuid string) error { func (svc *DbProgramMysql) RestoreBackupHistory(ctx context.Context, dbName string, dbBackupId uint64, dbBackupHistoryUuid string) error {
@@ -158,18 +183,33 @@ func (svc *DbProgramMysql) RestoreBackupHistory(ctx context.Context, dbName stri
"--password=" + dbInfo.Password, "--password=" + dbInfo.Password,
} }
compressed := false
fileName := filepath.Join(svc.getDbBackupDir(svc.dbInfo().InstanceId, dbBackupId), fileName := filepath.Join(svc.getDbBackupDir(svc.dbInfo().InstanceId, dbBackupId),
fmt.Sprintf("%v.sql", dbBackupHistoryUuid)) fmt.Sprintf("%v.sql", dbBackupHistoryUuid))
_, err := os.Stat(fileName)
if err != nil {
compressed = true
fileName += ".gz"
}
file, err := os.Open(fileName) file, err := os.Open(fileName)
if err != nil { if err != nil {
return errors.Wrap(err, "打开备份文件失败") return errors.Wrap(err, "打开备份文件失败")
} }
defer func() { defer func() { _ = file.Close() }()
_ = file.Close()
}() var reader io.ReadCloser
if compressed {
reader, err = gzip.NewReader(file)
if err != nil {
return errors.Wrap(err, "解压缩备份文件失败")
}
defer func() { _ = reader.Close() }()
} else {
reader = file
}
cmd := exec.CommandContext(ctx, svc.getMysqlBin().MysqlPath, args...) cmd := exec.CommandContext(ctx, svc.getMysqlBin().MysqlPath, args...)
cmd.Stdin = file cmd.Stdin = reader
logx.Debug("恢复数据库: ", cmd.String()) logx.Debug("恢复数据库: ", cmd.String())
if err := runCmd(cmd); err != nil { if err := runCmd(cmd); err != nil {
logx.Errorf("运行 mysql 程序失败: %v", err) logx.Errorf("运行 mysql 程序失败: %v", err)
@@ -205,13 +245,17 @@ func (svc *DbProgramMysql) downloadBinlogFilesOnServer(ctx context.Context, binl
} }
// Parse the first binlog eventTs of a local binlog file. // Parse the first binlog eventTs of a local binlog file.
func (svc *DbProgramMysql) parseLocalBinlogLastEventTime(ctx context.Context, filePath string) (eventTime time.Time, parseErr error) { func (svc *DbProgramMysql) parseLocalBinlogLastEventTime(ctx context.Context, filePath string, lastEventTime time.Time) (eventTime time.Time, parseErr error) {
// todo: implement me return svc.parseLocalBinlogEventTime(ctx, filePath, false, lastEventTime)
return time.Now(), nil
} }
// Parse the first binlog eventTs of a local binlog file. // Parse the first binlog eventTs of a local binlog file.
func (svc *DbProgramMysql) parseLocalBinlogFirstEventTime(ctx context.Context, filePath string) (eventTime time.Time, parseErr error) { func (svc *DbProgramMysql) parseLocalBinlogFirstEventTime(ctx context.Context, filePath string) (eventTime time.Time, parseErr error) {
return svc.parseLocalBinlogEventTime(ctx, filePath, true, time.Time{})
}
// Parse the first binlog eventTs of a local binlog file.
func (svc *DbProgramMysql) parseLocalBinlogEventTime(ctx context.Context, filePath string, firstOrLast bool, startTime time.Time) (eventTime time.Time, parseErr error) {
args := []string{ args := []string{
// Local binlog file path. // Local binlog file path.
filePath, filePath,
@@ -220,6 +264,9 @@ func (svc *DbProgramMysql) parseLocalBinlogFirstEventTime(ctx context.Context, f
// Tell mysqlbinlog to suppress the BINLOG statements for row events, which reduces the unneeded output. // Tell mysqlbinlog to suppress the BINLOG statements for row events, which reduces the unneeded output.
"--base64-output=DECODE-ROWS", "--base64-output=DECODE-ROWS",
} }
if !startTime.IsZero() {
args = append(args, "--start-datetime", startTime.Local().Format(time.DateTime))
}
cmd := exec.CommandContext(ctx, svc.getMysqlBin().MysqlbinlogPath, args...) cmd := exec.CommandContext(ctx, svc.getMysqlBin().MysqlbinlogPath, args...)
var stderr strings.Builder var stderr strings.Builder
cmd.Stderr = &stderr cmd.Stderr = &stderr
@@ -237,22 +284,30 @@ func (svc *DbProgramMysql) parseLocalBinlogFirstEventTime(ctx context.Context, f
parseErr = errors.Wrap(parseErr, stderr.String()) parseErr = errors.Wrap(parseErr, stderr.String())
} }
}() }()
lastEventTime := time.Time{}
for s := bufio.NewScanner(pr); s.Scan(); { for s := bufio.NewScanner(pr); s.Scan(); {
line := s.Text() line := s.Text()
eventTimeParsed, found, err := parseBinlogEventTimeInLine(line) eventTimeParsed, found, err := parseBinlogEventTimeInLine(line)
if err != nil { if err != nil {
return time.Time{}, errors.Wrap(err, "解析 binlog 文件失败") return time.Time{}, errors.Wrap(err, "解析 binlog 文件失败")
} }
if found { if !found {
return eventTimeParsed, nil continue
} }
if !firstOrLast {
lastEventTime = eventTimeParsed
continue
}
return eventTimeParsed, nil
} }
return time.Time{}, errors.New("解析 binlog 文件失败") if lastEventTime.IsZero() {
return time.Time{}, errors.New("解析 binlog 文件失败")
}
return lastEventTime, nil
} }
// FetchBinlogs downloads binlog files from startingFileName on server to `binlogDir`. // FetchBinlogs downloads binlog files from startingFileName on server to `binlogDir`.
func (svc *DbProgramMysql) FetchBinlogs(ctx context.Context, downloadLatestBinlogFile bool, earliestBackupSequence, latestBinlogSequence int64) ([]*entity.BinlogFile, error) { func (svc *DbProgramMysql) FetchBinlogs(ctx context.Context, downloadLatestBinlogFile bool, earliestBackupSequence int64, latestBinlogHistory *entity.DbBinlogHistory) ([]*entity.BinlogFile, error) {
// Read binlog files list on server. // Read binlog files list on server.
binlogFilesOnServerSorted, err := svc.GetSortedBinlogFilesOnServer(ctx) binlogFilesOnServerSorted, err := svc.GetSortedBinlogFilesOnServer(ctx)
if err != nil { if err != nil {
@@ -264,8 +319,11 @@ func (svc *DbProgramMysql) FetchBinlogs(ctx context.Context, downloadLatestBinlo
} }
indexHistory := -1 indexHistory := -1
for i, file := range binlogFilesOnServerSorted { for i, file := range binlogFilesOnServerSorted {
if latestBinlogSequence == file.Sequence { if latestBinlogHistory.Sequence == file.Sequence {
indexHistory = i + 1 indexHistory = i + 1
file.FirstEventTime = latestBinlogHistory.FirstEventTime
file.LastEventTime = latestBinlogHistory.LastEventTime
file.LocalSize = latestBinlogHistory.FileSize
break break
} }
if earliestBackupSequence == file.Sequence { if earliestBackupSequence == file.Sequence {
@@ -274,10 +332,15 @@ func (svc *DbProgramMysql) FetchBinlogs(ctx context.Context, downloadLatestBinlo
} }
} }
if indexHistory < 0 { if indexHistory < 0 {
return nil, errors.New(fmt.Sprintf("在数据库服务器上未找到 binlog 文件: %d, %d", earliestBackupSequence, latestBinlogSequence)) // todo: 数据库服务器上 binlog 序列已被删除, 导致 binlog 同步失败,如何处理?
return nil, errors.New(fmt.Sprintf("数据库服务器上的 binlog 序列已被删除: %d, %d", earliestBackupSequence, latestBinlogHistory.Sequence))
} }
if indexHistory > len(binlogFilesOnServerSorted)-1 { if indexHistory >= len(binlogFilesOnServerSorted)-1 {
indexHistory = len(binlogFilesOnServerSorted) - 1 indexHistory = len(binlogFilesOnServerSorted) - 1
if binlogFilesOnServerSorted[indexHistory].LocalSize == binlogFilesOnServerSorted[indexHistory].RemoteSize {
// 没有新的事件,不需要重新下载
return nil, nil
}
} }
binlogFilesOnServerSorted = binlogFilesOnServerSorted[indexHistory:] binlogFilesOnServerSorted = binlogFilesOnServerSorted[indexHistory:]
@@ -331,13 +394,14 @@ func (svc *DbProgramMysql) downloadBinlogFile(ctx context.Context, binlogFileToD
logx.Error("未找到 binlog 文件", logx.String("path", binlogFilePathTemp), logx.String("error", err.Error())) logx.Error("未找到 binlog 文件", logx.String("path", binlogFilePathTemp), logx.String("error", err.Error()))
return errors.Wrapf(err, "未找到 binlog 文件: %q", binlogFilePathTemp) return errors.Wrapf(err, "未找到 binlog 文件: %q", binlogFilePathTemp)
} }
if !isLast && binlogFileTempInfo.Size() != binlogFileToDownload.Size {
if (isLast && binlogFileTempInfo.Size() < binlogFileToDownload.RemoteSize) || (!isLast && binlogFileTempInfo.Size() != binlogFileToDownload.RemoteSize) {
logx.Error("Downloaded archived binlog file size is not equal to size queried on the MySQL server earlier.", logx.Error("Downloaded archived binlog file size is not equal to size queried on the MySQL server earlier.",
logx.String("binlog", binlogFileToDownload.Name), logx.String("binlog", binlogFileToDownload.Name),
logx.Int64("sizeInfo", binlogFileToDownload.Size), logx.Int64("sizeInfo", binlogFileToDownload.RemoteSize),
logx.Int64("downloadedSize", binlogFileTempInfo.Size()), logx.Int64("downloadedSize", binlogFileTempInfo.Size()),
) )
return errors.Errorf("下载的 binlog 文件 %q 与服务上的文件大小不一致 %d != %d", binlogFilePathTemp, binlogFileTempInfo.Size(), binlogFileToDownload.Size) return errors.Errorf("下载的 binlog 文件 %q 与服务上的文件大小不一致 %d != %d", binlogFilePathTemp, binlogFileTempInfo.Size(), binlogFileToDownload.RemoteSize)
} }
binlogFilePath := svc.GetBinlogFilePath(binlogFileToDownload.Name) binlogFilePath := svc.GetBinlogFilePath(binlogFileToDownload.Name)
@@ -348,7 +412,7 @@ func (svc *DbProgramMysql) downloadBinlogFile(ctx context.Context, binlogFileToD
if err != nil { if err != nil {
return err return err
} }
lastEventTime, err := svc.parseLocalBinlogLastEventTime(ctx, binlogFilePath) lastEventTime, err := svc.parseLocalBinlogLastEventTime(ctx, binlogFilePath, binlogFileToDownload.LastEventTime)
if err != nil { if err != nil {
return err return err
} }
@@ -394,9 +458,9 @@ func (svc *DbProgramMysql) GetSortedBinlogFilesOnServer(_ context.Context) ([]*e
return nil, errors.Wrapf(err, "SQL 语句 %q 执行结果解析失败", query) return nil, errors.Wrapf(err, "SQL 语句 %q 执行结果解析失败", query)
} }
binlogFile := &entity.BinlogFile{ binlogFile := &entity.BinlogFile{
Name: name, Name: name,
Size: int64(size), RemoteSize: int64(size),
Sequence: seq, Sequence: seq,
} }
binlogFiles = append(binlogFiles, binlogFile) binlogFiles = append(binlogFiles, binlogFile)
} }
@@ -781,3 +845,9 @@ func (svc *DbProgramMysql) getDbBackupDir(instanceId, backupId uint64) string {
fmt.Sprintf("instance-%d", instanceId), fmt.Sprintf("instance-%d", instanceId),
fmt.Sprintf("backup-%d", backupId)) fmt.Sprintf("backup-%d", backupId))
} }
func (svc *DbProgramMysql) PruneBinlog(history *entity.DbBinlogHistory) error {
binlogFilePath := filepath.Join(svc.getBinlogDir(history.DbInstanceId), history.FileName)
_ = os.Remove(binlogFilePath)
return nil
}

View File

@@ -47,11 +47,11 @@ func (s *DbInstanceSuite) SetupSuite() {
Username: "test", Username: "test",
Password: "test", Password: "test",
} }
dbConn, err := dbInfo.Conn(GetMeta()) dbConn, err := dbInfo.Conn(dbi.GetMeta(dbi.DbTypeMysql))
s.Require().NoError(err) s.Require().NoError(err)
s.dbConn = dbConn s.dbConn = dbConn
s.repositories = &repository.Repositories{ s.repositories = &repository.Repositories{
Instance: persistence.GetInstanceRepo(), Instance: persistence.NewInstanceRepo(),
Backup: persistence.NewDbBackupRepo(), Backup: persistence.NewDbBackupRepo(),
BackupHistory: persistence.NewDbBackupHistoryRepo(), BackupHistory: persistence.NewDbBackupHistoryRepo(),
Restore: persistence.NewDbRestoreRepo(), Restore: persistence.NewDbRestoreRepo(),
@@ -111,7 +111,7 @@ func (s *DbInstanceSuite) testBackup(backupHistory *entity.DbBackupHistory) {
binlogInfo, err := s.instanceSvc.Backup(context.Background(), backupHistory) binlogInfo, err := s.instanceSvc.Backup(context.Background(), backupHistory)
require.NoError(err) require.NoError(err)
fileName := filepath.Join(s.instanceSvc.getDbBackupDir(s.dbConn.Info.InstanceId, backupHistory.Id), dbNameBackupTest+".sql") fileName := filepath.Join(s.instanceSvc.getDbBackupDir(s.dbConn.Info.InstanceId, backupHistory.Id), dbNameBackupTest+".sql.gz")
_, err = os.Stat(fileName) _, err = os.Stat(fileName)
require.NoError(err) require.NoError(err)

View File

@@ -248,8 +248,8 @@ func (od *OracleDialect) GetSchemas() ([]string, error) {
} }
// GetDbProgram 获取数据库程序模块,用于数据库备份与恢复 // GetDbProgram 获取数据库程序模块,用于数据库备份与恢复
func (od *OracleDialect) GetDbProgram() dbi.DbProgram { func (od *OracleDialect) GetDbProgram() (dbi.DbProgram, error) {
panic("implement me") return nil, fmt.Errorf("该数据库类型不支持数据库备份与恢复: %v", od.dc.Info.Type)
} }
func (od *OracleDialect) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) { func (od *OracleDialect) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) {

View File

@@ -188,8 +188,8 @@ func (md *PgsqlDialect) GetSchemas() ([]string, error) {
} }
// GetDbProgram 获取数据库程序模块,用于数据库备份与恢复 // GetDbProgram 获取数据库程序模块,用于数据库备份与恢复
func (md *PgsqlDialect) GetDbProgram() dbi.DbProgram { func (md *PgsqlDialect) GetDbProgram() (dbi.DbProgram, error) {
panic("implement me") return nil, fmt.Errorf("该数据库类型不支持数据库备份与恢复: %v", md.dc.Info.Type)
} }
func (md *PgsqlDialect) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) { func (md *PgsqlDialect) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) {

View File

@@ -180,8 +180,8 @@ func (sd *SqliteDialect) GetSchemas() ([]string, error) {
} }
// GetDbProgram 获取数据库程序模块,用于数据库备份与恢复 // GetDbProgram 获取数据库程序模块,用于数据库备份与恢复
func (sd *SqliteDialect) GetDbProgram() dbi.DbProgram { func (sd *SqliteDialect) GetDbProgram() (dbi.DbProgram, error) {
panic("implement me") return nil, fmt.Errorf("该数据库类型不支持数据库备份与恢复: %v", sd.dc.Info.Type)
} }
func (sd *SqliteDialect) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) { func (sd *SqliteDialect) BatchInsert(tx *sql.Tx, tableName string, columns []string, values [][]any) (int64, error) {

View File

@@ -18,6 +18,7 @@ type DbBackup struct {
EnabledDesc string // 启用状态描述 EnabledDesc string // 启用状态描述
StartTime time.Time // 开始时间 StartTime time.Time // 开始时间
Interval time.Duration // 间隔时间 Interval time.Duration // 间隔时间
MaxSaveDays int // 数据库备份历史保留天数,过期将自动删除
Repeated bool // 是否重复执行 Repeated bool // 是否重复执行
} }
@@ -81,10 +82,6 @@ func (b *DbBackup) GetInterval() time.Duration {
return b.Interval return b.Interval
} }
func (b *DbBackup) SetLastStatus(status DbJobStatus, err error) {
b.setLastStatus(b.GetJobType(), status, err)
}
func (b *DbBackup) GetKey() DbJobKey { func (b *DbBackup) GetKey() DbJobKey {
return b.getKey(b.GetJobType()) return b.getKey(b.GetJobType())
} }

View File

@@ -11,14 +11,16 @@ const (
// BinlogFile is the metadata of the MySQL binlog file. // BinlogFile is the metadata of the MySQL binlog file.
type BinlogFile struct { type BinlogFile struct {
Name string Name string
Size int64 RemoteSize int64
LocalSize int64
// Sequence is parsed from Name and is for the sorting purpose. // Sequence is parsed from Name and is for the sorting purpose.
Sequence int64 Sequence int64
FirstEventTime time.Time FirstEventTime time.Time
LastEventTime time.Time LastEventTime time.Time
Downloaded bool
Downloaded bool
} }
var _ DbJob = (*DbBinlog)(nil) var _ DbJob = (*DbBinlog)(nil)
@@ -76,10 +78,6 @@ func (b *DbBinlog) GetJobType() DbJobType {
return DbJobTypeBinlog return DbJobTypeBinlog
} }
func (b *DbBinlog) SetLastStatus(status DbJobStatus, err error) {
b.setLastStatus(b.GetJobType(), status, err)
}
func (b *DbBinlog) GetKey() DbJobKey { func (b *DbBinlog) GetKey() DbJobKey {
return b.getKey(b.GetJobType()) return b.getKey(b.GetJobType())
} }

View File

@@ -14,6 +14,7 @@ type DbBinlogHistory struct {
FileSize int64 FileSize int64
Sequence int64 Sequence int64
FirstEventTime time.Time FirstEventTime time.Time
LastEventTime time.Time
DbInstanceId uint64 `json:"dbInstanceId"` DbInstanceId uint64 `json:"dbInstanceId"`
} }

View File

@@ -45,7 +45,6 @@ var _ runner.Job = (DbJob)(nil)
type DbJobBase interface { type DbJobBase interface {
model.ModelI model.ModelI
GetLastStatus() DbJobStatus
} }
type DbJob interface { type DbJob interface {
@@ -62,7 +61,6 @@ type DbJob interface {
SetEnabled(enabled bool, desc string) SetEnabled(enabled bool, desc string)
Update(job runner.Job) Update(job runner.Job)
GetInterval() time.Duration GetInterval() time.Duration
SetLastStatus(status DbJobStatus, err error)
} }
var _ DbJobBase = (*DbJobBaseImpl)(nil) var _ DbJobBase = (*DbJobBaseImpl)(nil)
@@ -84,10 +82,6 @@ func (d *DbJobBaseImpl) getJobType() DbJobType {
return job.GetJobType() return job.GetJobType()
} }
func (d *DbJobBaseImpl) GetLastStatus() DbJobStatus {
return d.LastStatus
}
func (d *DbJobBaseImpl) setLastStatus(jobType DbJobType, status DbJobStatus, err error) { func (d *DbJobBaseImpl) setLastStatus(jobType DbJobType, status DbJobStatus, err error) {
var statusName, jobName string var statusName, jobName string
switch status { switch status {

View File

@@ -79,10 +79,6 @@ func (r *DbRestore) GetJobType() DbJobType {
return DbJobTypeRestore return DbJobTypeRestore
} }
func (r *DbRestore) SetLastStatus(status DbJobStatus, err error) {
r.setLastStatus(r.GetJobType(), status, err)
}
func (r *DbRestore) GetKey() DbJobKey { func (r *DbRestore) GetKey() DbJobKey {
return r.getKey(r.GetJobType()) return r.getKey(r.GetJobType())
} }

View File

@@ -6,7 +6,7 @@ import (
) )
type DbBackup interface { type DbBackup interface {
DbJob DbJob[*entity.DbBackup]
ListToDo(jobs any) error ListToDo(jobs any) error
ListDbInstances(enabled bool, repeated bool, instanceIds *[]uint64) error ListDbInstances(enabled bool, repeated bool, instanceIds *[]uint64) error
@@ -14,4 +14,6 @@ type DbBackup interface {
// GetPageList 分页获取数据库任务列表 // GetPageList 分页获取数据库任务列表
GetPageList(condition *entity.DbBackupQuery, pageParam *model.PageParam, toEntity any, orderBy ...string) (*model.PageResult[any], error) GetPageList(condition *entity.DbBackupQuery, pageParam *model.PageParam, toEntity any, orderBy ...string) (*model.PageResult[any], error)
ListByCond(cond any, listModels any, cols ...string) error
} }

View File

@@ -12,12 +12,13 @@ type DbBackupHistory interface {
// GetPageList 分页获取数据备份历史 // GetPageList 分页获取数据备份历史
GetPageList(condition *entity.DbBackupHistoryQuery, pageParam *model.PageParam, toEntity any, orderBy ...string) (*model.PageResult[any], error) GetPageList(condition *entity.DbBackupHistoryQuery, pageParam *model.PageParam, toEntity any, orderBy ...string) (*model.PageResult[any], error)
GetLatestHistory(instanceId uint64, dbName string, bi *entity.BinlogInfo) (*entity.DbBackupHistory, error) GetLatestHistoryForBinlog(instanceId uint64, dbName string, bi *entity.BinlogInfo) (*entity.DbBackupHistory, error)
GetEarliestHistory(instanceId uint64) (*entity.DbBackupHistory, bool, error) GetEarliestHistoryForBinlog(instanceId uint64) (*entity.DbBackupHistory, bool, error)
GetHistories(backupHistoryIds []uint64, toEntity any) error GetHistories(backupHistoryIds []uint64, toEntity any) error
UpdateDeleting(deleting bool, backupHistoryId ...uint64) (bool, error) UpdateDeleting(deleting bool, backupHistoryId ...uint64) (bool, error)
UpdateRestoring(restoring bool, backupHistoryId ...uint64) (bool, error) UpdateRestoring(restoring bool, backupHistoryId ...uint64) (bool, error)
ZeroBinlogInfo(backupHistoryId uint64) error
} }

View File

@@ -6,7 +6,7 @@ import (
) )
type DbBinlog interface { type DbBinlog interface {
DbJob DbJob[*entity.DbBinlog]
AddJobIfNotExists(ctx context.Context, job *entity.DbBinlog) error AddJobIfNotExists(ctx context.Context, job *entity.DbBinlog) error
} }

View File

@@ -19,4 +19,6 @@ type DbBinlogHistory interface {
InsertWithBinlogFiles(ctx context.Context, instanceId uint64, binlogFiles []*entity.BinlogFile) error InsertWithBinlogFiles(ctx context.Context, instanceId uint64, binlogFiles []*entity.BinlogFile) error
Upsert(ctx context.Context, history *entity.DbBinlogHistory) error Upsert(ctx context.Context, history *entity.DbBinlogHistory) error
GetHistoriesBeforeSequence(ctx context.Context, instanceId uint64, binlogSeq int64, histories *[]*entity.DbBinlogHistory) error
} }

View File

@@ -3,24 +3,18 @@ package repository
import ( import (
"context" "context"
"mayfly-go/internal/db/domain/entity" "mayfly-go/internal/db/domain/entity"
"mayfly-go/pkg/base"
) )
type DbJobBase interface { type DbJobBase[T entity.DbJob] interface {
// GetById 根据实体id查询 base.Repo[T]
GetById(e entity.DbJob, id uint64, cols ...string) error
// UpdateById 根据实体id更新实体信息
UpdateById(ctx context.Context, e entity.DbJob, columns ...string) error
// DeleteById 根据实体主键删除实体
DeleteById(ctx context.Context, id uint64) error
// UpdateLastStatus 更新任务执行状态 // UpdateLastStatus 更新任务执行状态
UpdateLastStatus(ctx context.Context, job entity.DbJob) error UpdateLastStatus(ctx context.Context, job entity.DbJob) error
} }
type DbJob interface { type DbJob[T entity.DbJob] interface {
DbJobBase DbJobBase[T]
// AddJob 添加数据库任务 // AddJob 添加数据库任务
AddJob(ctx context.Context, jobs any) error AddJob(ctx context.Context, jobs any) error

View File

@@ -6,7 +6,7 @@ import (
) )
type DbRestore interface { type DbRestore interface {
DbJob DbJob[*entity.DbRestore]
ListToDo(jobs any) error ListToDo(jobs any) error
GetDbNamesWithoutRestore(instanceId uint64, dbNames []string) ([]string, error) GetDbNamesWithoutRestore(instanceId uint64, dbNames []string) ([]string, error)

View File

@@ -64,7 +64,6 @@ func (d *dbBackupRepoImpl) ListToDo(jobs any) error {
// GetPageList 分页获取数据库备份任务列表 // GetPageList 分页获取数据库备份任务列表
func (d *dbBackupRepoImpl) GetPageList(condition *entity.DbBackupQuery, pageParam *model.PageParam, toEntity any, _ ...string) (*model.PageResult[any], error) { func (d *dbBackupRepoImpl) GetPageList(condition *entity.DbBackupQuery, pageParam *model.PageParam, toEntity any, _ ...string) (*model.PageResult[any], error) {
d.GetModel()
qd := gormx.NewQuery(d.GetModel()). qd := gormx.NewQuery(d.GetModel()).
Eq("id", condition.Id). Eq("id", condition.Id).
Eq0("db_instance_id", condition.DbInstanceId). Eq0("db_instance_id", condition.DbInstanceId).
@@ -83,12 +82,16 @@ func (d *dbBackupRepoImpl) UpdateEnabled(_ context.Context, jobId uint64, enable
cond := map[string]any{ cond := map[string]any{
"id": jobId, "id": jobId,
} }
desc := "任务已禁用" desc := "已禁用"
if enabled { if enabled {
desc = "任务已启用" desc = "已启用"
} }
return d.Updates(cond, map[string]any{ return d.Updates(cond, map[string]any{
"enabled": enabled, "enabled": enabled,
"enabled_desc": desc, "enabled_desc": desc,
}) })
} }
func (d *dbBackupRepoImpl) ListByCond(cond any, listModels any, cols ...string) error {
return d.dbJobBaseImpl.ListByCond(cond, listModels, cols...)
}

View File

@@ -34,12 +34,13 @@ func (repo *dbBackupHistoryRepoImpl) GetPageList(condition *entity.DbBackupHisto
func (repo *dbBackupHistoryRepoImpl) GetHistories(backupHistoryIds []uint64, toEntity any) error { func (repo *dbBackupHistoryRepoImpl) GetHistories(backupHistoryIds []uint64, toEntity any) error {
return global.Db.Model(repo.GetModel()). return global.Db.Model(repo.GetModel()).
Where("id in ?", backupHistoryIds). Where("id in ?", backupHistoryIds).
Where("deleting = false").
Scopes(gormx.UndeleteScope). Scopes(gormx.UndeleteScope).
Find(toEntity). Find(toEntity).
Error Error
} }
func (repo *dbBackupHistoryRepoImpl) GetLatestHistory(instanceId uint64, dbName string, bi *entity.BinlogInfo) (*entity.DbBackupHistory, error) { func (repo *dbBackupHistoryRepoImpl) GetLatestHistoryForBinlog(instanceId uint64, dbName string, bi *entity.BinlogInfo) (*entity.DbBackupHistory, error) {
history := &entity.DbBackupHistory{} history := &entity.DbBackupHistory{}
db := global.Db db := global.Db
err := db.Model(repo.GetModel()). err := db.Model(repo.GetModel()).
@@ -48,6 +49,8 @@ func (repo *dbBackupHistoryRepoImpl) GetLatestHistory(instanceId uint64, dbName
Where(db.Where("binlog_sequence < ?", bi.Sequence). Where(db.Where("binlog_sequence < ?", bi.Sequence).
Or(db.Where("binlog_sequence = ?", bi.Sequence). Or(db.Where("binlog_sequence = ?", bi.Sequence).
Where("binlog_position <= ?", bi.Position))). Where("binlog_position <= ?", bi.Position))).
Where("binlog_sequence > 0").
Where("deleting = false").
Scopes(gormx.UndeleteScope). Scopes(gormx.UndeleteScope).
Order("binlog_sequence desc, binlog_position desc"). Order("binlog_sequence desc, binlog_position desc").
First(history).Error First(history).Error
@@ -57,10 +60,12 @@ func (repo *dbBackupHistoryRepoImpl) GetLatestHistory(instanceId uint64, dbName
return history, err return history, err
} }
func (repo *dbBackupHistoryRepoImpl) GetEarliestHistory(instanceId uint64) (*entity.DbBackupHistory, bool, error) { func (repo *dbBackupHistoryRepoImpl) GetEarliestHistoryForBinlog(instanceId uint64) (*entity.DbBackupHistory, bool, error) {
history := &entity.DbBackupHistory{} history := &entity.DbBackupHistory{}
db := global.Db.Model(repo.GetModel()) db := global.Db.Model(repo.GetModel())
err := db.Where("db_instance_id = ?", instanceId). err := db.Where("db_instance_id = ?", instanceId).
Where("binlog_sequence > 0").
Where("deleting = false").
Scopes(gormx.UndeleteScope). Scopes(gormx.UndeleteScope).
Order("binlog_sequence"). Order("binlog_sequence").
First(history).Error First(history).Error
@@ -79,7 +84,7 @@ func (repo *dbBackupHistoryRepoImpl) UpdateDeleting(deleting bool, backupHistory
Where("id in ?", backupHistoryId). Where("id in ?", backupHistoryId).
Where("restoring = false"). Where("restoring = false").
Scopes(gormx.UndeleteScope). Scopes(gormx.UndeleteScope).
Update("restoring", deleting) Update("deleting", deleting)
if db.Error != nil { if db.Error != nil {
return false, db.Error return false, db.Error
} }
@@ -103,3 +108,15 @@ func (repo *dbBackupHistoryRepoImpl) UpdateRestoring(restoring bool, backupHisto
} }
return true, nil return true, nil
} }
func (repo *dbBackupHistoryRepoImpl) ZeroBinlogInfo(backupHistoryId uint64) error {
return global.Db.Model(repo.GetModel()).
Where("id = ?", backupHistoryId).
Where("restoring = false").
Scopes(gormx.UndeleteScope).
Updates(&map[string]any{
"binlog_file_name": "",
"binlog_sequence": 0,
"binlog_position": 0,
}).Error
}

View File

@@ -7,6 +7,7 @@ import (
"mayfly-go/internal/db/domain/entity" "mayfly-go/internal/db/domain/entity"
"mayfly-go/internal/db/domain/repository" "mayfly-go/internal/db/domain/repository"
"mayfly-go/pkg/base" "mayfly-go/pkg/base"
"mayfly-go/pkg/global"
"mayfly-go/pkg/gormx" "mayfly-go/pkg/gormx"
"time" "time"
) )
@@ -82,7 +83,7 @@ func (repo *dbBinlogHistoryRepoImpl) Upsert(_ context.Context, history *entity.D
First(old).Error First(old).Error
switch { switch {
case err == nil: case err == nil:
return db.Model(old).Select("create_time", "file_size", "first_event_time").Updates(history).Error return db.Model(old).Select("create_time", "file_size", "first_event_time", "last_event_time").Updates(history).Error
case errors.Is(err, gorm.ErrRecordNotFound): case errors.Is(err, gorm.ErrRecordNotFound):
return db.Create(history).Error return db.Create(history).Error
default: default:
@@ -103,9 +104,10 @@ func (repo *dbBinlogHistoryRepoImpl) InsertWithBinlogFiles(ctx context.Context,
history := &entity.DbBinlogHistory{ history := &entity.DbBinlogHistory{
CreateTime: time.Now(), CreateTime: time.Now(),
FileName: fileOnServer.Name, FileName: fileOnServer.Name,
FileSize: fileOnServer.Size, FileSize: fileOnServer.RemoteSize,
Sequence: fileOnServer.Sequence, Sequence: fileOnServer.Sequence,
FirstEventTime: fileOnServer.FirstEventTime, FirstEventTime: fileOnServer.FirstEventTime,
LastEventTime: fileOnServer.LastEventTime,
DbInstanceId: instanceId, DbInstanceId: instanceId,
} }
histories = append(histories, history) histories = append(histories, history)
@@ -122,3 +124,13 @@ func (repo *dbBinlogHistoryRepoImpl) InsertWithBinlogFiles(ctx context.Context,
} }
return nil return nil
} }
func (repo *dbBinlogHistoryRepoImpl) GetHistoriesBeforeSequence(ctx context.Context, instanceId uint64, binlogSeq int64, histories *[]*entity.DbBinlogHistory) error {
return global.Db.Model(repo.GetModel()).
Where("db_instance_id = ?", instanceId).
Where("sequence < ?", binlogSeq).
Scopes(gormx.UndeleteScope).
Order("id").
Find(histories).
Error
}

View File

@@ -12,20 +12,12 @@ import (
"reflect" "reflect"
) )
var _ repository.DbJobBase = (*dbJobBaseImpl[entity.DbJob])(nil) var _ repository.DbJobBase[entity.DbJob] = (*dbJobBaseImpl[entity.DbJob])(nil)
type dbJobBaseImpl[T entity.DbJob] struct { type dbJobBaseImpl[T entity.DbJob] struct {
base.RepoImpl[T] base.RepoImpl[T]
} }
func (d *dbJobBaseImpl[T]) GetById(e entity.DbJob, id uint64, cols ...string) error {
return d.RepoImpl.GetById(e.(T), id, cols...)
}
func (d *dbJobBaseImpl[T]) UpdateById(ctx context.Context, e entity.DbJob, columns ...string) error {
return d.RepoImpl.UpdateById(ctx, e.(T), columns...)
}
func (d *dbJobBaseImpl[T]) UpdateLastStatus(ctx context.Context, job entity.DbJob) error { func (d *dbJobBaseImpl[T]) UpdateLastStatus(ctx context.Context, job entity.DbJob) error {
return d.UpdateById(ctx, job.(T), "last_status", "last_result", "last_time") return d.UpdateById(ctx, job.(T), "last_status", "last_result", "last_time")
} }

View File

@@ -84,9 +84,9 @@ func (d *dbRestoreRepoImpl) UpdateEnabled(_ context.Context, jobId uint64, enabl
cond := map[string]any{ cond := map[string]any{
"id": jobId, "id": jobId,
} }
desc := "任务已禁用" desc := "已禁用"
if enabled { if enabled {
desc = "任务已启用" desc = "已启用"
} }
return d.Updates(cond, map[string]any{ return d.Updates(cond, map[string]any{
"enabled": enabled, "enabled": enabled,

View File

@@ -12,7 +12,7 @@ type instanceRepoImpl struct {
base.RepoImpl[*entity.DbInstance] base.RepoImpl[*entity.DbInstance]
} }
func newInstanceRepo() repository.Instance { func NewInstanceRepo() repository.Instance {
return &instanceRepoImpl{base.RepoImpl[*entity.DbInstance]{M: new(entity.DbInstance)}} return &instanceRepoImpl{base.RepoImpl[*entity.DbInstance]{M: new(entity.DbInstance)}}
} }

View File

@@ -5,7 +5,7 @@ import (
) )
func Init() { func Init() {
ioc.Register(newInstanceRepo(), ioc.WithComponentName("DbInstanceRepo")) ioc.Register(NewInstanceRepo(), ioc.WithComponentName("DbInstanceRepo"))
ioc.Register(newDbRepo(), ioc.WithComponentName("DbRepo")) ioc.Register(newDbRepo(), ioc.WithComponentName("DbRepo"))
ioc.Register(newDbSqlRepo(), ioc.WithComponentName("DbSqlRepo")) ioc.Register(newDbSqlRepo(), ioc.WithComponentName("DbSqlRepo"))
ioc.Register(newDbSqlExecRepo(), ioc.WithComponentName("DbSqlExecRepo")) ioc.Register(newDbSqlExecRepo(), ioc.WithComponentName("DbSqlExecRepo"))

View File

@@ -24,6 +24,12 @@ func ErrIsNil(err error, msgAndParams ...any) {
} }
} }
func ErrNotNil(err error, msg string, params ...any) {
if err == nil {
panic(errorx.NewBiz(fmt.Sprintf(msg, params...)))
}
}
func ErrIsNilAppendErr(err error, msg string) { func ErrIsNilAppendErr(err error, msg string) {
if err != nil { if err != nil {
panic(errorx.NewBiz(fmt.Sprintf(msg, err.Error()))) panic(errorx.NewBiz(fmt.Sprintf(msg, err.Error())))

View File

@@ -22,7 +22,7 @@ var (
type JobKey = string type JobKey = string
type RunJobFunc[T Job] func(ctx context.Context, job T) error type RunJobFunc[T Job] func(ctx context.Context, job T) error
type NextJobFunc[T Job] func() (T, bool) type NextJobFunc[T Job] func() (T, bool)
type RunnableJobFunc[T Job] func(job T, next NextJobFunc[T]) (bool, error) type RunnableJobFunc[T Job] func(job T, nextRunning NextJobFunc[T]) (bool, error)
type ScheduleJobFunc[T Job] func(job T) (deadline time.Time, err error) type ScheduleJobFunc[T Job] func(job T) (deadline time.Time, err error)
type UpdateJobFunc[T Job] func(ctx context.Context, job T) error type UpdateJobFunc[T Job] func(ctx context.Context, job T) error

View File

@@ -50,6 +50,7 @@ CREATE TABLE IF NOT EXISTS "t_db_backup" (
"db_name" text(64) NOT NULL, "db_name" text(64) NOT NULL,
"repeated" integer(1), "repeated" integer(1),
"interval" integer(20), "interval" integer(20),
"max_save_days" integer(8) NOT NULL DEFAULT '0',
"start_time" datetime, "start_time" datetime,
"enabled" integer(1), "enabled" integer(1),
"enabled_desc" text(64), "enabled_desc" text(64),
@@ -81,8 +82,8 @@ CREATE TABLE IF NOT EXISTS "t_db_backup_history" (
"create_time" datetime, "create_time" datetime,
"is_deleted" integer(1) NOT NULL, "is_deleted" integer(1) NOT NULL,
"delete_time" datetime, "delete_time" datetime,
"restoring" integer(1), "restoring" integer(1) NOT NULL DEFAULT '0',
"deleting" integer(1), "deleting" integer(1) NOT NULL DEFAULT '0',
PRIMARY KEY ("id") PRIMARY KEY ("id")
); );
@@ -112,6 +113,7 @@ CREATE TABLE IF NOT EXISTS "t_db_binlog_history" (
"file_size" integer(20), "file_size" integer(20),
"sequence" integer(20), "sequence" integer(20),
"first_event_time" datetime, "first_event_time" datetime,
"last_event_time" datetime,
"create_time" datetime, "create_time" datetime,
"is_deleted" integer(4) NOT NULL, "is_deleted" integer(4) NOT NULL,
"delete_time" datetime, "delete_time" datetime,
@@ -830,8 +832,8 @@ INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight,
INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES (153, 150, 'Jra0n7De/pLOA2UYz/', 2, 1, '删除', 'db:sync:del', 1703641342, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2023-12-27 09:42:22', '2023-12-27 09:42:22', 0, NULL); INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES (153, 150, 'Jra0n7De/pLOA2UYz/', 2, 1, '删除', 'db:sync:del', 1703641342, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2023-12-27 09:42:22', '2023-12-27 09:42:22', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES (154, 150, 'Jra0n7De/VBt68CDx/', 2, 1, '启停', 'db:sync:status', 1703641364, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2023-12-27 09:42:45', '2023-12-27 09:42:45', 0, NULL); INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES (154, 150, 'Jra0n7De/VBt68CDx/', 2, 1, '启停', 'db:sync:status', 1703641364, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2023-12-27 09:42:45', '2023-12-27 09:42:45', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES (155, 150, 'Jra0n7De/PigmSGVg/', 2, 1, '日志', 'db:sync:log', 1704266866, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-01-03 15:27:47', '2024-01-03 15:27:47', 0, NULL); INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES (155, 150, 'Jra0n7De/PigmSGVg/', 2, 1, '日志', 'db:sync:log', 1704266866, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-01-03 15:27:47', '2024-01-03 15:27:47', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES (161, 49, 'dbms23ax/xleaiec2/3NUXQFIO/', 2, 1, '数据库备份', 'db:backup', 1705973876, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:37:56', '2024-01-23 09:37:56', 0, NULL); INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES (160, 49, 'dbms23ax/xleaiec2/3NUXQFIO/', 2, 1, '数据库备份', 'db:backup', 1705973876, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:37:56', '2024-01-23 09:37:56', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES (160, 49, 'dbms23ax/xleaiec2/ghErkTdb/', 2, 1, '数据库恢复', 'db:restore', 1705973909, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:38:29', '2024-01-23 09:38:29', 0, NULL); INSERT INTO t_sys_resource (id, pid, ui_path, type, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES (161, 49, 'dbms23ax/xleaiec2/ghErkTdb/', 2, 1, '数据库恢复', 'db:restore', 1705973909, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:38:29', '2024-01-23 09:38:29', 0, NULL);
-- Table: t_sys_role -- Table: t_sys_role
CREATE TABLE IF NOT EXISTS "t_sys_role" ( CREATE TABLE IF NOT EXISTS "t_sys_role" (

View File

@@ -108,6 +108,7 @@ CREATE TABLE `t_db_backup` (
`db_name` varchar(64) NOT NULL COMMENT '数据库名称', `db_name` varchar(64) NOT NULL COMMENT '数据库名称',
`repeated` tinyint(1) DEFAULT NULL COMMENT '是否重复执行', `repeated` tinyint(1) DEFAULT NULL COMMENT '是否重复执行',
`interval` bigint(20) DEFAULT NULL COMMENT '备份周期', `interval` bigint(20) DEFAULT NULL COMMENT '备份周期',
`max_save_days` int(8) NOT NULL DEFAULT '0' COMMENT '最大保留天数',
`start_time` datetime DEFAULT NULL COMMENT '首次备份时间', `start_time` datetime DEFAULT NULL COMMENT '首次备份时间',
`enabled` tinyint(1) DEFAULT NULL COMMENT '是否启用', `enabled` tinyint(1) DEFAULT NULL COMMENT '是否启用',
`enabled_desc` varchar(64) NULL COMMENT '任务启用描述', `enabled_desc` varchar(64) NULL COMMENT '任务启用描述',
@@ -144,8 +145,8 @@ CREATE TABLE `t_db_backup_history` (
`create_time` datetime DEFAULT NULL COMMENT '历史备份创建时间', `create_time` datetime DEFAULT NULL COMMENT '历史备份创建时间',
`is_deleted` tinyint(1) NOT NULL DEFAULT 0, `is_deleted` tinyint(1) NOT NULL DEFAULT 0,
`delete_time` datetime DEFAULT NULL, `delete_time` datetime DEFAULT NULL,
`restoring` int(1) NOT NULL DEFAULT '0' COMMENT '备份历史恢复标识', `restoring` tinyint(1) NOT NULL DEFAULT '0' COMMENT '备份历史恢复标识',
`deleting` int(1) NOT NULL DEFAULT '0' COMMENT '备份历史删除标识', `deleting` tinyint(1) NOT NULL DEFAULT '0' COMMENT '备份历史删除标识',
PRIMARY KEY (`id`), PRIMARY KEY (`id`),
KEY `idx_db_backup_id` (`db_backup_id`) USING BTREE, KEY `idx_db_backup_id` (`db_backup_id`) USING BTREE,
KEY `idx_db_instance_id` (`db_instance_id`) USING BTREE, KEY `idx_db_instance_id` (`db_instance_id`) USING BTREE,
@@ -232,6 +233,7 @@ CREATE TABLE `t_db_binlog_history` (
`file_size` bigint(20) DEFAULT NULL COMMENT 'BINLOG文件大小', `file_size` bigint(20) DEFAULT NULL COMMENT 'BINLOG文件大小',
`sequence` bigint(20) DEFAULT NULL COMMENT 'BINLOG序列号', `sequence` bigint(20) DEFAULT NULL COMMENT 'BINLOG序列号',
`first_event_time` datetime DEFAULT NULL COMMENT '首次事件时间', `first_event_time` datetime DEFAULT NULL COMMENT '首次事件时间',
`last_event_time` datetime DEFAULT NULL COMMENT '最新事件时间',
`create_time` datetime DEFAULT NULL, `create_time` datetime DEFAULT NULL,
`is_deleted` tinyint(4) NOT NULL DEFAULT 0, `is_deleted` tinyint(4) NOT NULL DEFAULT 0,
`delete_time` datetime DEFAULT NULL, `delete_time` datetime DEFAULT NULL,
@@ -792,8 +794,8 @@ INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight
INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(152, 150, 'Jra0n7De/zvAMo2vk/', 2, 1, '编辑', 'db:sync:save', 1703641320, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2023-12-27 09:42:00', '2023-12-27 09:42:12', 0, NULL); INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(152, 150, 'Jra0n7De/zvAMo2vk/', 2, 1, '编辑', 'db:sync:save', 1703641320, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2023-12-27 09:42:00', '2023-12-27 09:42:12', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(151, 150, 'Jra0n7De/uAnHZxEV/', 2, 1, '基本权限', 'db:sync', 1703641202, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2023-12-27 09:40:02', '2023-12-27 09:40:02', 0, NULL); INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(151, 150, 'Jra0n7De/uAnHZxEV/', 2, 1, '基本权限', 'db:sync', 1703641202, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2023-12-27 09:40:02', '2023-12-27 09:40:02', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(150, 36, 'Jra0n7De/', 1, 1, '数据同步', 'sync', 1693040707, '{"component":"ops/db/SyncTaskList","icon":"Coin","isKeepAlive":true,"routeName":"SyncTaskList"}', 12, 'liuzongyang', 12, 'liuzongyang', '2023-12-22 09:51:34', '2023-12-27 10:16:57', 0, NULL); INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(150, 36, 'Jra0n7De/', 1, 1, '数据同步', 'sync', 1693040707, '{"component":"ops/db/SyncTaskList","icon":"Coin","isKeepAlive":true,"routeName":"SyncTaskList"}', 12, 'liuzongyang', 12, 'liuzongyang', '2023-12-22 09:51:34', '2023-12-27 10:16:57', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(161, 49, 'dbms23ax/xleaiec2/3NUXQFIO/', 2, 1, '数据库备份', 'db:backup', 1705973876, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:37:56', '2024-01-23 09:37:56', 0, NULL); INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(160, 49, 'dbms23ax/xleaiec2/3NUXQFIO/', 2, 1, '数据库备份', 'db:backup', 1705973876, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:37:56', '2024-01-23 09:37:56', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(160, 49, 'dbms23ax/xleaiec2/ghErkTdb/', 2, 1, '数据库恢复', 'db:restore', 1705973909, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:38:29', '2024-01-23 09:38:29', 0, NULL); INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(161, 49, 'dbms23ax/xleaiec2/ghErkTdb/', 2, 1, '数据库恢复', 'db:restore', 1705973909, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:38:29', '2024-01-23 09:38:29', 0, NULL);
COMMIT; COMMIT;
-- ---------------------------- -- ----------------------------

View File

@@ -1,6 +1,5 @@
INSERT INTO `t_sys_resource` (`id`, `pid`, `ui_path`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `is_deleted`, `delete_time`) INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(160, 49, 'dbms23ax/xleaiec2/3NUXQFIO/', 2, 1, '数据库备份', 'db:backup', 1705973876, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:37:56', '2024-01-23 09:37:56', 0, NULL);
VALUES (161, 49, 'dbms23ax/xleaiec2/3NUXQFIO/', 2, 1, '数据库备份', 'db:backup', 1705973876, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:37:56', '2024-01-23 09:37:56', 0, NULL), INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(161, 49, 'dbms23ax/xleaiec2/ghErkTdb/', 2, 1, '数据库恢复', 'db:restore', 1705973909, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:38:29', '2024-01-23 09:38:29', 0, NULL);
(160, 49, 'dbms23ax/xleaiec2/ghErkTdb/', 2, 1, '数据库恢复', 'db:restore', 1705973909, 'null', 1, 'admin', 1, 'admin', '2024-01-23 09:38:29', '2024-01-23 09:38:29', 0, NULL);
ALTER TABLE `t_db_backup` ALTER TABLE `t_db_backup`
ADD COLUMN `enabled_desc` varchar(64) NULL COMMENT '任务启用描述' AFTER `enabled`; ADD COLUMN `enabled_desc` varchar(64) NULL COMMENT '任务启用描述' AFTER `enabled`;
@@ -9,5 +8,5 @@ ALTER TABLE `t_db_restore`
ADD COLUMN `enabled_desc` varchar(64) NULL COMMENT '任务启用描述' AFTER `enabled`; ADD COLUMN `enabled_desc` varchar(64) NULL COMMENT '任务启用描述' AFTER `enabled`;
ALTER TABLE `t_db_backup_history` ALTER TABLE `t_db_backup_history`
ADD COLUMN `restoring` int(1) NOT NULL DEFAULT '0' COMMENT '备份历史恢复标识', ADD COLUMN `restoring` tinyint(1) NOT NULL DEFAULT '0' COMMENT '备份历史恢复标识',
ADD COLUMN `deleting` int(1) NOT NULL DEFAULT '0' COMMENT '备份历史删除标识'; ADD COLUMN `deleting` tinyint(1) NOT NULL DEFAULT '0' COMMENT '备份历史删除标识';

View File

@@ -0,0 +1,5 @@
ALTER TABLE `t_db_backup`
ADD COLUMN `max_save_days` int(8) NOT NULL DEFAULT '0' COMMENT '最大保存天数' AFTER `interval`;
ALTER TABLE `t_db_binlog_history`
ADD COLUMN `last_event_time` datetime NULL DEFAULT NULL COMMENT '最新事件时间' AFTER `first_event_time`;