!124 一些更新和bug

* fix: 代码合并
* feat:支持数据库版本兼容,目前兼容了oracle11g部分特性
* fix: 修改数据同步bug,数据sql里指定修改字段别,导致未正确记录修改字段值
* feat: 数据库迁移支持定时迁移和迁移到sql文件
This commit is contained in:
zongyangleo
2024-10-20 03:52:23 +00:00
committed by Coder慌
parent 6837a9c867
commit 6343173cf8
68 changed files with 3319 additions and 1587 deletions

View File

@@ -96,7 +96,6 @@ require (
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
google.golang.org/protobuf v1.34.1 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
modernc.org/libc v1.22.5 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.5.0 // indirect

View File

@@ -25,6 +25,7 @@ import (
"mayfly-go/pkg/utils/collx"
"mayfly-go/pkg/utils/cryptox"
"mayfly-go/pkg/utils/stringx"
"mayfly-go/pkg/utils/writer"
"mayfly-go/pkg/ws"
"strings"
"time"
@@ -257,7 +258,7 @@ func (d *Db) DumpSql(rc *req.Ctx) {
Tables: tables,
DumpDDL: needStruct,
DumpData: needData,
Writer: rc.GetWriter(),
Writer: writer.NewGzipWriter(rc.GetWriter()),
}))
rc.ReqParam = collx.Kvs("db", dbConn.Info, "database", dbName, "tables", tablesStr, "dumpType", dumpType)
@@ -338,6 +339,11 @@ func (d *Db) GetTableDDL(rc *req.Ctx) {
rc.ResData = res
}
func (d *Db) GetVersion(rc *req.Ctx) {
version := d.getDbConn(rc).GetMetaData().GetCompatibleDbVersion()
rc.ResData = version
}
func (d *Db) GetSchemas(rc *req.Ctx) {
res, err := d.getDbConn(rc).GetMetaData().GetSchemas()
biz.ErrIsNilAppendErr(err, "获取schemas失败: %s")

View File

@@ -1,24 +1,54 @@
package api
import (
"fmt"
"io"
"mayfly-go/internal/db/api/form"
"mayfly-go/internal/db/api/vo"
"mayfly-go/internal/db/application"
"mayfly-go/internal/db/config"
"mayfly-go/internal/db/dbm/dbi"
"mayfly-go/internal/db/dbm/sqlparser"
"mayfly-go/internal/db/domain/entity"
msgapp "mayfly-go/internal/msg/application"
msgdto "mayfly-go/internal/msg/application/dto"
tagapp "mayfly-go/internal/tag/application"
"mayfly-go/pkg/biz"
"mayfly-go/pkg/errorx"
"mayfly-go/pkg/model"
"mayfly-go/pkg/req"
"mayfly-go/pkg/utils/anyx"
"mayfly-go/pkg/utils/stringx"
"mayfly-go/pkg/ws"
"os"
"path/filepath"
"strconv"
"strings"
"time"
)
type DbTransferTask struct {
DbTransferTask application.DbTransferTask `inject:"DbTransferTaskApp"`
DbTransferFile application.DbTransferFile `inject:"DbTransferFileApp"`
DbApp application.Db `inject:""`
TagApp tagapp.TagTree `inject:"TagTreeApp"`
MsgApp msgapp.Msg `inject:""`
DbSqlExecApp application.DbSqlExec `inject:""`
}
func (d *DbTransferTask) Tasks(rc *req.Ctx) {
queryCond, page := req.BindQueryAndPage[*entity.DbTransferTaskQuery](rc, new(entity.DbTransferTaskQuery))
res, err := d.DbTransferTask.GetPageList(queryCond, page, new([]vo.DbTransferTaskListVO))
biz.ErrIsNil(err)
list := res.List.(*[]vo.DbTransferTaskListVO)
for _, item := range *list {
item.RunningState = entity.DbTransferTaskRunStateSuccess
if d.DbTransferTask.IsRunning(item.Id) {
item.RunningState = entity.DbTransferTaskRunStateRunning
}
}
rc.ResData = res
}
@@ -35,11 +65,27 @@ func (d *DbTransferTask) DeleteTask(rc *req.Ctx) {
rc.ReqParam = taskId
ids := strings.Split(taskId, ",")
uids := make([]uint64, len(ids))
for _, v := range ids {
value, err := strconv.Atoi(v)
biz.ErrIsNilAppendErr(err, "string类型转换为int异常: %s")
biz.ErrIsNil(d.DbTransferTask.Delete(rc.MetaCtx, uint64(value)))
uids = append(uids, uint64(value))
}
biz.ErrIsNil(d.DbTransferTask.DeleteById(rc.MetaCtx, uids...))
}
func (d *DbTransferTask) ChangeStatus(rc *req.Ctx) {
form := &form.DbTransferTaskStatusForm{}
task := req.BindJsonAndCopyTo[*entity.DbTransferTask](rc, form, new(entity.DbTransferTask))
_ = d.DbTransferTask.UpdateById(rc.MetaCtx, task)
task, err := d.DbTransferTask.GetById(task.Id)
biz.ErrIsNil(err, "该任务不存在")
d.DbTransferTask.AddCronJob(rc.MetaCtx, task)
// 记录请求日志
rc.ReqParam = form
}
func (d *DbTransferTask) Run(rc *req.Ctx) {
@@ -52,3 +98,140 @@ func (d *DbTransferTask) Run(rc *req.Ctx) {
func (d *DbTransferTask) Stop(rc *req.Ctx) {
biz.ErrIsNil(d.DbTransferTask.Stop(rc.MetaCtx, uint64(rc.PathParamInt("taskId"))))
}
func (d *DbTransferTask) Files(rc *req.Ctx) {
queryCond, page := req.BindQueryAndPage[*entity.DbTransferFileQuery](rc, new(entity.DbTransferFileQuery))
res, err := d.DbTransferFile.GetPageList(queryCond, page, new([]vo.DbTransferFileListVO))
biz.ErrIsNil(err)
rc.ResData = res
}
func (d *DbTransferTask) FileRename(rc *req.Ctx) {
fm := &form.DbTransferFileForm{}
tFile := req.BindJsonAndCopyTo[*entity.DbTransferFile](rc, fm, new(entity.DbTransferFile))
_ = d.DbTransferFile.UpdateById(rc.MetaCtx, tFile)
rc.ReqParam = fm
}
func (d *DbTransferTask) FileDel(rc *req.Ctx) {
fileId := rc.PathParam("fileId")
rc.ReqParam = fileId // 记录操作日志
ids := strings.Split(fileId, ",")
uIds := make([]uint64, len(ids))
for _, v := range ids {
value, err := strconv.Atoi(v)
biz.ErrIsNilAppendErr(err, "string类型转换为int异常: %s")
uIds = append(uIds, uint64(value))
}
biz.ErrIsNil(d.DbTransferFile.Delete(rc.MetaCtx, uIds...))
}
func (d *DbTransferTask) FileDown(rc *req.Ctx) {
fileUuid := rc.PathParam("fileUuid")
if fileUuid == "" {
panic(errorx.NewBiz("文件id不能为空"))
}
tFile := &entity.DbTransferFile{FileUuid: fileUuid}
err := d.DbTransferFile.GetByCond(model.NewModelCond(tFile).Dest(tFile))
biz.ErrIsNilAppendErr(err, "查询文件出错 %s")
// 拼接文件地址,并把文件流输出到客户端
brc := config.GetDbBackupRestore()
filePath := filepath.Join(fmt.Sprintf("%s/%d/%s.sql", brc.TransferPath, tFile.TaskId, fileUuid))
file, err := os.Open(filePath)
biz.ErrIsNilAppendErr(err, "读取文件失败:%s")
defer file.Close()
// Get the file information to set the correct response headers
fileInfo, err := file.Stat()
biz.ErrIsNilAppendErr(err, "读取文件失败:%s")
rc.ReqParam = tFile // 记录操作日志
// 如果文件名不以 .sql 结尾,则加上 .sql
if !strings.HasSuffix(tFile.FileName, ".sql") {
tFile.FileName += ".sql"
}
rc.Header("Content-Type", "application/octet-stream")
rc.Header("Content-Disposition", "attachment; filename="+tFile.FileName)
rc.Header("Content-Length", strconv.FormatInt(fileInfo.Size(), 10))
_, err = io.Copy(rc.GetWriter(), file)
}
func (d *DbTransferTask) FileRun(rc *req.Ctx) {
fm := req.BindJsonAndValid(rc, &form.DbTransferFileRunForm{})
rc.ReqParam = fm
tFile, err := d.DbTransferFile.GetById(fm.Id)
biz.IsTrue(tFile != nil && err == nil, "文件不存在")
targetDbConn, err := d.DbApp.GetDbConn(fm.TargetDbId, fm.TargetDbName)
biz.ErrIsNilAppendErr(err, "连接目标数据库失败: %s")
biz.ErrIsNilAppendErr(d.TagApp.CanAccess(rc.GetLoginAccount().Id, targetDbConn.Info.CodePath...), "%s")
defer func() {
if err := recover(); err != nil {
errInfo := anyx.ToString(err)
if len(errInfo) > 300 {
errInfo = errInfo[:300] + "..."
}
d.MsgApp.CreateAndSend(rc.GetLoginAccount(), msgdto.ErrSysMsg("sql脚本执行失败", fmt.Sprintf("[%s][%s]执行失败: [%s]", tFile.FileName, targetDbConn.Info.GetLogDesc(), errInfo)).WithClientId(fm.ClientId))
}
}()
go func() {
d.fileRun(rc.GetLoginAccount(), fm, tFile, targetDbConn)
}()
}
func (d *DbTransferTask) fileRun(la *model.LoginAccount, fm *form.DbTransferFileRunForm, tFile *entity.DbTransferFile, targetDbConn *dbi.DbConn) {
filePath := d.DbTransferFile.GetFilePath(tFile)
_, err := os.Stat(filePath)
biz.ErrIsNilAppendErr(err, "sql文件不存在%s")
file, err := os.Open(filePath)
biz.ErrIsNilAppendErr(err, "sql文件读取出错%s")
executedStatements := 0
progressId := stringx.Rand(32)
laId := la.Id
ticker := time.NewTicker(time.Second * 1)
defer ticker.Stop()
if err != nil {
biz.ErrIsNilAppendErr(err, "连接目标数据库失败: %s")
}
err = sqlparser.SQLSplit(file, func(sql string) error {
select {
case <-ticker.C:
ws.SendJsonMsg(ws.UserId(laId), fm.ClientId, msgdto.InfoSqlProgressMsg("sql脚本执行进度", &progressMsg{
Id: progressId,
Title: tFile.FileName,
ExecutedStatements: executedStatements,
Terminated: false,
}).WithCategory(progressCategory))
default:
}
executedStatements++
_, err = targetDbConn.Exec(sql)
return err
})
if err != nil {
biz.ErrIsNilAppendErr(err, "执行sql失败: %s")
}
d.MsgApp.CreateAndSend(la, msgdto.SuccessSysMsg("sql脚本执行成功", fmt.Sprintf("sql脚本执行完成%s", tFile.FileName)).WithClientId(fm.ClientId))
}

View File

@@ -14,6 +14,7 @@ type DataSyncTaskForm struct {
PageSize int `binding:"required" json:"pageSize"`
UpdField string `binding:"required" json:"updField"`
UpdFieldVal string `binding:"required" json:"updFieldVal"`
UpdFieldSrc string `json:"updFieldSrc"`
TargetDbId int64 `binding:"required" json:"targetDbId"`
TargetDbName string `binding:"required" json:"targetDbName"`

View File

@@ -1,20 +1,43 @@
package form
type DbTransferTaskForm struct {
Id uint64 `json:"id"`
TaskName string `binding:"required" json:"taskName"` // 任务名称
CheckedKeys string `binding:"required" json:"checkedKeys"` // 选中需要迁移的表
DeleteTable int `binding:"required" json:"deleteTable"` // 创建表前是否删除表 1是 2
NameCase int `binding:"required" json:"nameCase"` // 表名、字段大小写转换 1无 2大写 3小写
Strategy int `binding:"required" json:"strategy"` // 迁移策略 1全量 2增量
SrcDbId int `binding:"required" json:"srcDbId"` // 源库id
SrcDbName string `binding:"required" json:"srcDbName"` // 源库名
SrcDbType string `binding:"required" json:"srcDbType"` // 源库类型
SrcInstName string `binding:"required" json:"srcInstName"` // 源库实例名
SrcTagPath string `binding:"required" json:"srcTagPath"` // 源库tagPath
TargetDbId int `binding:"required" json:"targetDbId"` // 目标库id
TargetDbName string `binding:"required" json:"targetDbName"` // 目标库名
TargetDbType string `binding:"required" json:"targetDbType"` // 目标库类型
TargetInstName string `binding:"required" json:"targetInstName"` // 目标库实例名
TargetTagPath string `binding:"required" json:"targetTagPath"` // 目标库tagPath
Id uint64 `json:"id"`
TaskName string `binding:"required" json:"taskName"` // 任务名称
CronAble int `json:"cronAble"` // 是否定时 1是 -1
Cron string `json:"cron"` // 定时任务cron表达式
Mode int `binding:"required" json:"mode"` // 数据迁移方式1、迁移到数据库 2、迁移到文件
TargetFileDbType string `json:"targetFileDbType"` // 目标文件数据库类型
Status int `json:"status" form:"status"` // 启用状态 1启用 -1禁用
CheckedKeys string `binding:"required" json:"checkedKeys"` // 选中需要迁移的表
DeleteTable int `binding:"required" json:"deleteTable"` // 创建表前是否删除表 1是 2否
NameCase int `binding:"required" json:"nameCase"` // 表名、字段大小写转换 1无 2大写 3小写
Strategy int `binding:"required" json:"strategy"` // 迁移策略 1全量 2增量
SrcDbId int `binding:"required" json:"srcDbId"` // 源库id
SrcDbName string `binding:"required" json:"srcDbName"` // 源库名
SrcDbType string `binding:"required" json:"srcDbType"` // 源库类型
SrcInstName string `binding:"required" json:"srcInstName"` // 源库实例名
SrcTagPath string `binding:"required" json:"srcTagPath"` // 源库tagPath
TargetDbId int `json:"targetDbId"` // 目标库id
TargetDbName string `json:"targetDbName"` // 目标库名
TargetDbType string `json:"targetDbType"` // 目标库类型
TargetInstName string `json:"targetInstName"` // 目标库实例名
TargetTagPath string `json:"targetTagPath"` // 目标库tagPath
}
type DbTransferTaskStatusForm struct {
Id uint64 `binding:"required" json:"taskId" form:"taskId"`
Status int `json:"status" form:"status"`
}
type DbTransferFileForm struct {
Id uint64 `json:"id"`
FileName string `json:"fileName" form:"fileName"`
}
type DbTransferFileRunForm struct {
Id uint64 `json:"id"` // 文件ID
TargetDbId uint64 `json:"targetDbId" form:"targetDbId"` // 需要执行sql的数据库id
TargetDbName string `json:"targetDbName" form:"targetDbName"` // 需要执行sql的数据库名
ClientId string `json:"clientId" form:"clientId"` // 客户端的唯一id用于消息回传
}

View File

@@ -1,17 +1,24 @@
package vo
import "time"
import (
"time"
)
type DbTransferTaskListVO struct {
Id *int64 `json:"id"`
Id uint64 `json:"id"`
CreateTime *time.Time `json:"createTime"`
Creator string `json:"creator"`
UpdateTime *time.Time `json:"updateTime"`
Modifier string `json:"modifier"`
RunningState int `json:"runningState"`
LogId uint64 `json:"logId"`
TaskName string `json:"taskName"` // 任务名称
RunningState int8 `json:"runningState"`
LogId uint64 `json:"logId"`
TaskName string `json:"taskName"` // 任务名称
Status int `json:"status"` // 任务状态 1启用 -1禁用
CronAble int `json:"cronAble"` // 是否定时 1是 -1否
Cron string `json:"cron"` // 定时任务cron表达式
Mode int `json:"mode"` // 数据迁移方式1、迁移到数据库 2、迁移到文件
TargetFileDbType string `json:"targetFileDbType"` // 目标文件数据库类型
CheckedKeys string `json:"checkedKeys"` // 选中需要迁移的表
DeleteTable int `json:"deleteTable"` // 创建表前是否删除表

View File

@@ -0,0 +1,13 @@
package vo
import "time"
type DbTransferFileListVO struct {
Id *int64 `json:"id"`
CreateTime *time.Time `json:"createTime"`
Status int8 `json:"status"`
FileDbType string `json:"fileDbType"`
FileName string `json:"fileName"`
FileUuid string `json:"fileUuid"`
LogId uint64 `json:"logId"` // 日志ID
}

View File

@@ -1,7 +1,6 @@
package application
import (
"fmt"
"mayfly-go/pkg/ioc"
"sync"
)
@@ -13,6 +12,7 @@ func InitIoc() {
ioc.Register(new(dbSqlAppImpl), ioc.WithComponentName("DbSqlApp"))
ioc.Register(new(dataSyncAppImpl), ioc.WithComponentName("DbDataSyncTaskApp"))
ioc.Register(new(dbTransferAppImpl), ioc.WithComponentName("DbTransferTaskApp"))
ioc.Register(new(dbTransferFileAppImpl), ioc.WithComponentName("DbTransferFileApp"))
ioc.Register(newDbScheduler(), ioc.WithComponentName("DbScheduler"))
ioc.Register(new(DbBackupApp), ioc.WithComponentName("DbBackupApp"))
@@ -22,17 +22,17 @@ func InitIoc() {
func Init() {
sync.OnceFunc(func() {
if err := GetDbBackupApp().Init(); err != nil {
panic(fmt.Sprintf("初始化 DbBackupApp 失败: %v", err))
}
if err := GetDbRestoreApp().Init(); err != nil {
panic(fmt.Sprintf("初始化 DbRestoreApp 失败: %v", err))
}
if err := GetDbBinlogApp().Init(); err != nil {
panic(fmt.Sprintf("初始化 DbBinlogApp 失败: %v", err))
}
//if err := GetDbBackupApp().Init(); err != nil {
// panic(fmt.Sprintf("初始化 DbBackupApp 失败: %v", err))
//}
//if err := GetDbRestoreApp().Init(); err != nil {
// panic(fmt.Sprintf("初始化 DbRestoreApp 失败: %v", err))
//}
//if err := GetDbBinlogApp().Init(); err != nil {
// panic(fmt.Sprintf("初始化 DbBinlogApp 失败: %v", err))
//}
GetDataSyncTaskApp().InitCronJob()
GetDbTransferTaskApp().InitJob()
GetDbTransferTaskApp().InitCronJob()
InitDbFlowHandler()
})()
}

View File

@@ -1,6 +1,7 @@
package application
import (
"cmp"
"context"
"fmt"
"mayfly-go/internal/db/application/dto"
@@ -224,7 +225,13 @@ func (d *dbAppImpl) GetDbConnByInstanceId(instanceId uint64) (*dbi.DbConn, error
}
func (d *dbAppImpl) DumpDb(ctx context.Context, reqParam *dto.DumpDb) error {
writer := newGzipWriter(reqParam.Writer)
log := dto.DefaultDumpLog
if reqParam.Log != nil {
log = reqParam.Log
}
writer := reqParam.Writer
defer writer.Close()
dbId := reqParam.DbId
dbName := reqParam.DbName
@@ -238,23 +245,47 @@ func (d *dbAppImpl) DumpDb(ctx context.Context, reqParam *dto.DumpDb) error {
writer.WriteString("\n-- 导出平台: mayfly-go")
writer.WriteString(fmt.Sprintf("\n-- 导出时间: %s ", time.Now().Format("2006-01-02 15:04:05")))
writer.WriteString(fmt.Sprintf("\n-- 导出数据库: %s ", dbName))
writer.WriteString(fmt.Sprintf("\n-- 数据库方言: %s ", cmp.Or(reqParam.TargetDbType, dbConn.Info.Type)))
writer.WriteString("\n-- ----------------------------\n\n")
dbMeta := dbConn.GetMetaData()
// 获取目标元数据仅生成sql用于生成建表语句和插入数据不能用于查询
targetMeta := dbConn.GetMetaData()
if reqParam.TargetDbType != "" && dbConn.Info.Type != reqParam.TargetDbType {
// 创建一个假连接仅用于调用方言生成sql不做数据库连接操作
meta := dbi.GetMeta(reqParam.TargetDbType)
dbConn := &dbi.DbConn{Info: &dbi.DbInfo{
Type: reqParam.TargetDbType,
Meta: meta,
}}
targetMeta = meta.GetMetaData(dbConn)
}
srcMeta := dbConn.GetMetaData()
if len(tables) == 0 {
ti, err := dbMeta.GetTables()
log("获取可导出的表信息...")
ti, err := srcMeta.GetTables()
if err != nil {
log(fmt.Sprintf("获取表信息失败 %s", err.Error()))
}
biz.ErrIsNil(err)
tables = make([]string, len(ti))
for i, table := range ti {
tables[i] = table.TableName
}
log(fmt.Sprintf("获取到%d张表", len(tables)))
}
if len(tables) == 0 {
log("不存在可导出的表, 结束导出")
return errorx.NewBiz("不存在可导出的表")
}
log("查询列信息...")
// 查询列信息后面生成建表ddl和insert都需要列信息
columns, err := dbMeta.GetColumns(tables...)
columns, err := srcMeta.GetColumns(tables...)
if err != nil {
log(fmt.Sprintf("查询列信息失败:%s", err.Error()))
}
biz.ErrIsNil(err)
// 以表名分组,存放每个表的列信息
@@ -266,21 +297,24 @@ func (d *dbAppImpl) DumpDb(ctx context.Context, reqParam *dto.DumpDb) error {
// 按表名排序
sort.Strings(tables)
quoteSchema := dbMeta.QuoteIdentifier(dbConn.Info.CurrentSchema())
dumpHelper := dbMeta.GetDumpHelper()
dataHelper := dbMeta.GetDataHelper()
quoteSchema := srcMeta.QuoteIdentifier(dbConn.Info.CurrentSchema())
dumpHelper := targetMeta.GetDumpHelper()
dataHelper := targetMeta.GetDataHelper()
// 遍历获取每个表的信息
for _, tableName := range tables {
quoteTableName := dbMeta.QuoteIdentifier(tableName)
log(fmt.Sprintf("获取表[%s]信息...", tableName))
quoteTableName := targetMeta.QuoteIdentifier(tableName)
writer.TryFlush()
// 查询表信息,主要是为了查询表注释
tbs, err := dbMeta.GetTables(tableName)
tbs, err := srcMeta.GetTables(tableName)
if err != nil {
log(fmt.Sprintf("获取表[%s]信息失败: %s", tableName, err.Error()))
return err
}
if len(tbs) <= 0 {
log(fmt.Sprintf("获取表[%s]信息失败: 没有查询到表信息", tableName))
return errorx.NewBiz(fmt.Sprintf("获取表信息失败:%s", tableName))
}
tabInfo := dbi.Table{
@@ -290,8 +324,9 @@ func (d *dbAppImpl) DumpDb(ctx context.Context, reqParam *dto.DumpDb) error {
// 生成表结构信息
if reqParam.DumpDDL {
log(fmt.Sprintf("生成表[%s]DDL...", tableName))
writer.WriteString(fmt.Sprintf("\n-- ----------------------------\n-- 表结构: %s \n-- ----------------------------\n", tableName))
tbDdlArr := dbMeta.GenerateTableDDL(columnMap[tableName], tabInfo, true)
tbDdlArr := targetMeta.GenerateTableDDL(columnMap[tableName], tabInfo, true)
for _, ddl := range tbDdlArr {
writer.WriteString(ddl + ";\n")
}
@@ -299,16 +334,17 @@ func (d *dbAppImpl) DumpDb(ctx context.Context, reqParam *dto.DumpDb) error {
// 生成insert sql数据在索引前加速insert
if reqParam.DumpData {
log(fmt.Sprintf("生成表[%s]DML...", tableName))
writer.WriteString(fmt.Sprintf("\n-- ----------------------------\n-- 表数据: %s \n-- ----------------------------\n", tableName))
dumpHelper.BeforeInsert(writer, quoteTableName)
// 获取列信息
quoteColNames := make([]string, 0)
for _, col := range columnMap[tableName] {
quoteColNames = append(quoteColNames, dbMeta.QuoteIdentifier(col.ColumnName))
quoteColNames = append(quoteColNames, targetMeta.QuoteIdentifier(col.ColumnName))
}
_, _ = dbConn.WalkTableRows(ctx, quoteTableName, func(row map[string]any, _ []*dbi.QueryColumn) error {
_, _ = dbConn.WalkTableRows(ctx, tableName, func(row map[string]any, _ []*dbi.QueryColumn) error {
rowValues := make([]string, len(columnMap[tableName]))
for i, col := range columnMap[tableName] {
rowValues[i] = dataHelper.WrapValue(row[col.ColumnName], dataHelper.GetDataType(string(col.DataType)))
@@ -323,15 +359,18 @@ func (d *dbAppImpl) DumpDb(ctx context.Context, reqParam *dto.DumpDb) error {
dumpHelper.AfterInsert(writer, tableName, columnMap[tableName])
}
indexs, err := dbMeta.GetTableIndex(tableName)
log(fmt.Sprintf("获取表[%s]索引信息...", tableName))
indexs, err := srcMeta.GetTableIndex(tableName)
if err != nil {
log(fmt.Sprintf("获取表[%s]索引信息失败:%s", tableName, err.Error()))
return err
}
if len(indexs) > 0 {
// 最后添加索引
log(fmt.Sprintf("生成表[%s]索引...", tableName))
writer.WriteString(fmt.Sprintf("\n-- ----------------------------\n-- 表索引: %s \n-- ----------------------------\n", tableName))
sqlArr := dbMeta.GenerateIndexDDL(indexs, tabInfo)
sqlArr := targetMeta.GenerateIndexDDL(indexs, tabInfo)
for _, sqlStr := range sqlArr {
writer.WriteString(sqlStr + ";\n")
}

View File

@@ -52,8 +52,9 @@ type dataSyncAppImpl struct {
}
var (
dateTimeReg = regexp.MustCompile(`^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$`)
whereReg = regexp.MustCompile(`(?i)where`)
dateTimeReg = regexp.MustCompile(`^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$`)
dateTimeIsoReg = regexp.MustCompile(`^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.*$`)
whereReg = regexp.MustCompile(`(?i)where`)
)
func (app *dataSyncAppImpl) InjectDbDataSyncTaskRepo(repo repository.DataSyncTask) {
@@ -155,7 +156,7 @@ func (app *dataSyncAppImpl) RunCronJob(ctx context.Context, id uint64) error {
// 判断UpdFieldVal数据类型
var updFieldValType dbi.DataType
if _, err = strconv.Atoi(task.UpdFieldVal); err != nil {
if dateTimeReg.MatchString(task.UpdFieldVal) {
if dateTimeReg.MatchString(task.UpdFieldVal) || dateTimeIsoReg.MatchString(task.UpdFieldVal) {
updFieldValType = dbi.DataTypeDateTime
} else {
updFieldValType = dbi.DataTypeString
@@ -328,13 +329,22 @@ func (app *dataSyncAppImpl) srcData2TargetDb(srcRes []map[string]any, fieldMap [
data = append(data, rowData)
}
// 解决字段大小写问题
updFieldVal := srcRes[len(srcRes)-1][strings.ToUpper(updFieldName)]
if updFieldVal == "" || updFieldVal == nil {
updFieldVal = srcRes[len(srcRes)-1][strings.ToLower(updFieldName)]
setUpdateFieldVal := func(field string) {
// 解决字段大小写问题
updFieldVal := srcRes[len(srcRes)-1][strings.ToUpper(field)]
if updFieldVal == "" || updFieldVal == nil {
updFieldVal = srcRes[len(srcRes)-1][strings.ToLower(field)]
}
task.UpdFieldVal = srcMetaData.GetDataHelper().FormatData(updFieldVal, updFieldType)
}
task.UpdFieldVal = srcMetaData.GetDataHelper().FormatData(updFieldVal, updFieldType)
// 如果指定了更新字段,则以更新字段取值
if task.UpdFieldSrc != "" {
setUpdateFieldVal(task.UpdFieldSrc)
} else {
setUpdateFieldVal(updFieldName)
}
// 获取目标库字段数组
targetWrapColumns := make([]string, 0)

View File

@@ -17,14 +17,16 @@ import (
"mayfly-go/pkg/model"
"mayfly-go/pkg/utils/collx"
"mayfly-go/pkg/utils/jsonx"
"os"
"strings"
)
type DbSqlExecReq struct {
DbId uint64
Db string
Sql string // 需要执行的sql支持多条
Remark string // 执行备注
Sql string // 需要执行的sql支持多条
SqlFile *os.File // sql文件
Remark string // 执行备注
DbConn *dbi.DbConn
CheckFlow bool // 是否检查存储审批流程
}

View File

@@ -1,9 +1,12 @@
package application
import (
"cmp"
"context"
"encoding/hex"
"fmt"
"github.com/google/uuid"
"mayfly-go/internal/db/application/dto"
"mayfly-go/internal/db/dbm/dbi"
"mayfly-go/internal/db/domain/entity"
"mayfly-go/internal/db/domain/repository"
@@ -11,10 +14,14 @@ import (
sysentity "mayfly-go/internal/sys/domain/entity"
"mayfly-go/pkg/base"
"mayfly-go/pkg/cache"
"mayfly-go/pkg/contextx"
"mayfly-go/pkg/errorx"
"mayfly-go/pkg/logx"
"mayfly-go/pkg/model"
"mayfly-go/pkg/scheduler"
"mayfly-go/pkg/utils/collx"
"mayfly-go/pkg/utils/writer"
"os"
"sort"
"strings"
"time"
@@ -32,20 +39,27 @@ type DbTransferTask interface {
Delete(ctx context.Context, id uint64) error
InitJob()
InitCronJob()
AddCronJob(ctx context.Context, taskEntity *entity.DbTransferTask)
RemoveCronJobById(taskId uint64)
CreateLog(ctx context.Context, taskId uint64) (uint64, error)
Run(ctx context.Context, taskId uint64, logId uint64)
IsRunning(taskId uint64) bool
Stop(ctx context.Context, taskId uint64) error
}
type dbTransferAppImpl struct {
base.AppImpl[*entity.DbTransferTask, repository.DbTransferTask]
dbApp Db `inject:"DbApp"`
logApp sysapp.Syslog `inject:"SyslogApp"`
dbApp Db `inject:"DbApp"`
logApp sysapp.Syslog `inject:"SyslogApp"`
fileApp DbTransferFile `inject:"DbTransferFileApp"`
}
func (app *dbTransferAppImpl) InjectDbTransferTaskRepo(repo repository.DbTransferTask) {
@@ -58,23 +72,97 @@ func (app *dbTransferAppImpl) GetPageList(condition *entity.DbTransferTaskQuery,
func (app *dbTransferAppImpl) Save(ctx context.Context, taskEntity *entity.DbTransferTask) error {
var err error
if taskEntity.Id == 0 {
if taskEntity.Id == 0 { // 新建时生成key
taskEntity.TaskKey = uuid.New().String()
err = app.Insert(ctx, taskEntity)
} else {
err = app.UpdateById(ctx, taskEntity)
}
return err
// 视情况添加或删除任务
task, err := app.GetById(taskEntity.Id)
if err != nil {
return err
}
app.AddCronJob(ctx, task)
return nil
}
func (app *dbTransferAppImpl) Delete(ctx context.Context, id uint64) error {
if err := app.DeleteById(ctx, id); err != nil {
return err
}
app.RemoveCronJobById(id)
return nil
}
func (app *dbTransferAppImpl) InitJob() {
app.UpdateByCond(context.TODO(), &entity.DbTransferTask{RunningState: entity.DbTransferTaskRunStateStop}, &entity.DbTransferTask{RunningState: entity.DbTransferTaskRunStateRunning})
func (app *dbTransferAppImpl) AddCronJob(ctx context.Context, taskEntity *entity.DbTransferTask) {
key := taskEntity.TaskKey
// 先移除旧的任务
scheduler.RemoveByKey(key)
// 根据状态添加新的任务
if taskEntity.Status == entity.DbTransferTaskStatusEnable && taskEntity.CronAble == entity.DbTransferTaskCronAbleEnable {
if key == "" {
taskEntity.TaskKey = uuid.New().String()
key = taskEntity.TaskKey
_ = app.UpdateById(ctx, taskEntity)
}
taskId := taskEntity.Id
scheduler.AddFunByKey(key, taskEntity.Cron, func() {
logx.Infof("开始执行同步任务: %d", taskId)
logId, _ := app.CreateLog(ctx, taskId)
app.Run(ctx, taskId, logId)
})
}
}
func (app *dbTransferAppImpl) InitCronJob() {
// 重启后,把正在运行的状态设置为停止
_ = app.UpdateByCond(context.TODO(), &entity.DbTransferTask{RunningState: entity.DbTransferTaskRunStateStop}, &entity.DbTransferTask{RunningState: entity.DbTransferTaskRunStateRunning})
ent := &entity.DbTransferTask{}
list, err := app.ListByCond(model.NewModelCond(ent).Columns("id"))
if err != nil {
return
}
if len(list) > 0 {
// 移除所有正在运行的任务
for _, task := range list {
app.MarkStop(task.Id)
}
}
// 把所有运行中的文件状态设置为失败
_ = app.fileApp.UpdateByCond(context.TODO(), &entity.DbTransferFile{Status: entity.DbTransferFileStatusFail}, &entity.DbTransferFile{Status: entity.DbTransferFileStatusRunning})
// 把所有需要定时执行的任务添加到定时任务中
pageParam := &model.PageParam{
PageSize: 100,
PageNum: 1,
}
cond := new(entity.DbTransferTaskQuery)
cond.Status = entity.DbTransferTaskStatusEnable
cond.CronAble = entity.DbTransferTaskCronAbleEnable
jobs := new([]entity.DbTransferTask)
pr, _ := app.GetPageList(cond, pageParam, jobs)
if nil == pr || pr.Total == 0 {
return
}
total := pr.Total
add := 0
for {
for _, job := range *jobs {
app.AddCronJob(contextx.NewTraceId(), &job)
add++
}
if add >= int(total) {
return
}
pageParam.PageNum++
_, _ = app.GetPageList(cond, pageParam, jobs)
}
}
func (app *dbTransferAppImpl) CreateLog(ctx context.Context, taskId uint64) (uint64, error) {
@@ -88,7 +176,6 @@ func (app *dbTransferAppImpl) CreateLog(ctx context.Context, taskId uint64) (uin
}
func (app *dbTransferAppImpl) Run(ctx context.Context, taskId uint64, logId uint64) {
defer app.logApp.Flush(logId, true)
task, err := app.GetById(taskId)
if err != nil {
@@ -97,7 +184,7 @@ func (app *dbTransferAppImpl) Run(ctx context.Context, taskId uint64, logId uint
}
if app.IsRunning(taskId) {
logx.Warnf("[%d]该任务正在运行中...", taskId)
logx.Panicf("[%d]该任务正在运行中...", taskId)
return
}
@@ -111,8 +198,7 @@ func (app *dbTransferAppImpl) Run(ctx context.Context, taskId uint64, logId uint
}
// 标记该任务开始执行
app.MarkRuning(taskId)
defer app.MarkStop(taskId)
app.MarkRunning(taskId)
// 获取源库连接、目标库连接判断连接可用性否则记录日志xx连接不可用
// 获取源库表信息
@@ -121,15 +207,9 @@ func (app *dbTransferAppImpl) Run(ctx context.Context, taskId uint64, logId uint
app.EndTransfer(ctx, logId, taskId, "获取源库连接失败", err, nil)
return
}
// 获取目标库表信息
targetConn, err := app.dbApp.GetDbConn(uint64(task.TargetDbId), task.TargetDbName)
if err != nil {
app.EndTransfer(ctx, logId, taskId, "获取目标库连接失败", err, nil)
return
}
// 获取迁移表信息
var tables []dbi.Table
if task.CheckedKeys == "all" {
tables, err = srcConn.GetMetaData().GetTables()
if err != nil {
@@ -145,8 +225,28 @@ func (app *dbTransferAppImpl) Run(ctx context.Context, taskId uint64, logId uint
}
}
// 迁移到文件或数据库
if task.Mode == entity.DbTransferTaskModeFile {
app.transfer2File(ctx, taskId, logId, task, srcConn, start, tables)
} else if task.Mode == entity.DbTransferTaskModeDb {
defer app.MarkStop(taskId)
defer app.logApp.Flush(logId, true)
app.transfer2Db(ctx, taskId, logId, task, srcConn, start, tables)
} else {
app.EndTransfer(ctx, logId, taskId, "迁移模式出错,目前仅支持迁移到文件或数据库", err, nil)
return
}
}
func (app *dbTransferAppImpl) transfer2Db(ctx context.Context, taskId uint64, logId uint64, task *entity.DbTransferTask, srcConn *dbi.DbConn, start time.Time, tables []dbi.Table) {
// 获取目标库表信息
targetConn, err := app.dbApp.GetDbConn(uint64(task.TargetDbId), task.TargetDbName)
if err != nil {
app.EndTransfer(ctx, logId, taskId, "获取目标库连接失败", err, nil)
return
}
// 迁移表
if err = app.transferTables(ctx, logId, task, srcConn, targetConn, tables); err != nil {
if err = app.transferDbTables(ctx, logId, task, srcConn, targetConn, tables); err != nil {
app.EndTransfer(ctx, logId, taskId, "迁移表失败", err, nil)
return
}
@@ -154,6 +254,67 @@ func (app *dbTransferAppImpl) Run(ctx context.Context, taskId uint64, logId uint
app.EndTransfer(ctx, logId, taskId, fmt.Sprintf("执行迁移完成,执行迁移任务[taskId = %d]完成, 耗时:%v", taskId, time.Since(start)), nil, nil)
}
func (app *dbTransferAppImpl) transfer2File(ctx context.Context, taskId uint64, logId uint64, task *entity.DbTransferTask, srcConn *dbi.DbConn, start time.Time, tables []dbi.Table) {
// 1、新增迁移文件数据
nowTime := time.Now()
tFile := &entity.DbTransferFile{
TaskId: taskId,
CreateTime: &nowTime,
Status: entity.DbTransferFileStatusRunning,
FileDbType: cmp.Or(task.TargetFileDbType, task.TargetDbType),
FileName: fmt.Sprintf("%s.sql", task.TaskName), // 用于下载和展示
FileUuid: uuid.New().String(), // 用于存放到磁盘
LogId: logId,
}
_ = app.fileApp.Save(ctx, tFile)
// 新建一个文件,文件位置为 {transferPath}/{taskId}/{uuid}.sql
filePath := app.fileApp.GetFilePath(tFile)
// 从tables提取表名
tableNames := make([]string, 0)
for _, table := range tables {
tableNames = append(tableNames, table.TableName)
}
// 2、把源库数据迁移到文件
app.Log(ctx, logId, fmt.Sprintf("开始迁移表数据到文件: %s", filePath))
app.Log(ctx, logId, fmt.Sprintf("目标库文件语言类型: %s", task.TargetFileDbType))
go func() {
defer app.MarkStop(taskId)
defer app.logApp.Flush(logId, true)
ctx = context.Background()
err := app.dbApp.DumpDb(ctx, &dto.DumpDb{
LogId: logId,
DbId: uint64(task.SrcDbId),
DbName: task.SrcDbName,
TargetDbType: dbi.DbType(task.TargetFileDbType),
Tables: tableNames,
DumpDDL: true,
DumpData: true,
Writer: writer.NewFileWriter(filePath),
Log: func(msg string) { // 记录日志
app.Log(ctx, logId, msg)
},
})
if err != nil {
app.EndTransfer(ctx, logId, taskId, "数据库迁移失败", err, nil)
tFile.Status = entity.DbTransferFileStatusFail
_ = app.fileApp.UpdateById(ctx, tFile)
// 删除文件
_ = os.Remove(filePath)
return
}
app.EndTransfer(ctx, logId, taskId, "数据库迁移完成", err, nil)
tFile.Status = entity.DbTransferFileStatusSuccess
_ = app.fileApp.UpdateById(ctx, tFile)
}()
}
func (app *dbTransferAppImpl) Stop(ctx context.Context, taskId uint64) error {
task, err := app.GetById(taskId)
if err != nil {
@@ -173,7 +334,7 @@ func (app *dbTransferAppImpl) Stop(ctx context.Context, taskId uint64) error {
}
// 迁移表
func (app *dbTransferAppImpl) transferTables(ctx context.Context, logId uint64, task *entity.DbTransferTask, srcConn *dbi.DbConn, targetConn *dbi.DbConn, tables []dbi.Table) error {
func (app *dbTransferAppImpl) transferDbTables(ctx context.Context, logId uint64, task *entity.DbTransferTask, srcConn *dbi.DbConn, targetConn *dbi.DbConn, tables []dbi.Table) error {
tableNames := make([]string, 0)
tableMap := make(map[string]dbi.Table) // 以表名分组,存放表信息
for _, table := range tables {
@@ -255,10 +416,7 @@ func (app *dbTransferAppImpl) transferTables(ctx context.Context, logId uint64,
})
}
if err := errGroup.Wait(); err != nil {
return err
}
return nil
return errGroup.Wait()
}
func (app *dbTransferAppImpl) transferData(ctx context.Context, logId uint64, taskId uint64, tableName string, targetColumns []dbi.Column, srcConn *dbi.DbConn, targetConn *dbi.DbConn) (int, error) {
@@ -386,8 +544,8 @@ func (app *dbTransferAppImpl) transferIndex(_ context.Context, tableInfo dbi.Tab
return targetDialect.CreateIndex(tableInfo, indexs)
}
// MarkRuning 标记任务执行中
func (app *dbTransferAppImpl) MarkRuning(taskId uint64) {
// MarkRunning 标记任务执行中
func (app *dbTransferAppImpl) MarkRunning(taskId uint64) {
cache.Set(fmt.Sprintf("mayfly:db:transfer:%d", taskId), 1, -1)
}
@@ -434,3 +592,10 @@ func (app *dbTransferAppImpl) EndTransfer(ctx context.Context, logId uint64, tas
task.RunningState = transferState
app.UpdateById(context.Background(), task)
}
func (app *dbTransferAppImpl) RemoveCronJobById(taskId uint64) {
task, err := app.GetById(taskId)
if err == nil {
scheduler.RemoveByKey(task.TaskKey)
}
}

View File

@@ -0,0 +1,78 @@
package application
import (
"context"
"fmt"
"github.com/google/uuid"
"mayfly-go/internal/db/config"
"mayfly-go/internal/db/domain/entity"
"mayfly-go/internal/db/domain/repository"
"mayfly-go/pkg/base"
"mayfly-go/pkg/model"
"os"
"path/filepath"
)
type DbTransferFile interface {
base.App[*entity.DbTransferFile]
// GetPageList 分页获取数据库实例
GetPageList(condition *entity.DbTransferFileQuery, pageParam *model.PageParam, toEntity any, orderBy ...string) (*model.PageResult[any], error)
Save(ctx context.Context, instanceEntity *entity.DbTransferFile) error
Delete(ctx context.Context, id ...uint64) error
GetFilePath(ent *entity.DbTransferFile) string
}
var _ DbTransferFile = (*dbTransferFileAppImpl)(nil)
type dbTransferFileAppImpl struct {
base.AppImpl[*entity.DbTransferFile, repository.DbTransferFile]
}
func (app *dbTransferFileAppImpl) InjectDbTransferFileRepo(repo repository.DbTransferFile) {
app.Repo = repo
}
func (app *dbTransferFileAppImpl) GetPageList(condition *entity.DbTransferFileQuery, pageParam *model.PageParam, toEntity any, orderBy ...string) (*model.PageResult[any], error) {
return app.GetRepo().GetPageList(condition, pageParam, toEntity, orderBy...)
}
func (app *dbTransferFileAppImpl) Save(ctx context.Context, taskEntity *entity.DbTransferFile) error {
var err error
if taskEntity.Id == 0 {
err = app.Insert(ctx, taskEntity)
} else {
err = app.UpdateById(ctx, taskEntity)
}
return err
}
func (app *dbTransferFileAppImpl) Delete(ctx context.Context, id ...uint64) error {
arr, err := app.GetByIds(id, "task_id", "file_uuid")
if err != nil {
return err
}
// 删除对应的文件
for _, file := range arr {
_ = os.Remove(app.GetFilePath(file))
}
// 删除数据
return app.DeleteById(ctx, id...)
}
func (app *dbTransferFileAppImpl) GetFilePath(ent *entity.DbTransferFile) string {
brc := config.GetDbBackupRestore()
if ent.FileUuid == "" {
ent.FileUuid = uuid.New().String()
}
filePath := filepath.Join(fmt.Sprintf("%s/%d/%s.sql", brc.TransferPath, ent.TaskId, ent.FileUuid))
return filePath
}

View File

@@ -1,9 +1,10 @@
package dto
import (
"io"
"mayfly-go/internal/db/dbm/dbi"
"mayfly-go/internal/db/domain/entity"
tagentity "mayfly-go/internal/tag/domain/entity"
"mayfly-go/pkg/utils/writer"
)
type SaveDbInstance struct {
@@ -19,5 +20,13 @@ type DumpDb struct {
DumpDDL bool // 是否dump ddl
DumpData bool // 是否dump data
Writer io.Writer
LogId uint64
Writer writer.CustomWriter
Log func(msg string)
TargetDbType dbi.DbType
}
func DefaultDumpLog(msg string) {
}

View File

@@ -1,6 +1,7 @@
package config
import (
"cmp"
sysapp "mayfly-go/internal/sys/application"
"path/filepath"
"runtime"
@@ -33,7 +34,8 @@ func GetDbms() *Dbms {
}
type DbBackupRestore struct {
BackupPath string // 备份文件路径呢
BackupPath string // 备份文件路径呢
TransferPath string // 数据库迁移文件存储路径
}
// 获取数据库备份配置
@@ -43,11 +45,8 @@ func GetDbBackupRestore() *DbBackupRestore {
dbrc := new(DbBackupRestore)
backupPath := jm["backupPath"]
if backupPath == "" {
backupPath = "./db/backup"
}
dbrc.BackupPath = filepath.Join(backupPath)
dbrc.BackupPath = filepath.Join(cmp.Or(jm["backupPath"], "./db/backup"))
dbrc.TransferPath = filepath.Join(cmp.Or(jm["transferPath"], "./db/transfer"))
return dbrc
}

View File

@@ -29,6 +29,10 @@ type QueryColumn struct {
Type string `json:"type"` // 类型
}
func (d *DbConn) GetDb() *sql.DB {
return d.db
}
// 执行查询语句
// 依次返回 列信息数组(顺序)结果map错误
func (d *DbConn) Query(querySql string, args ...any) ([]*QueryColumn, []map[string]any, error) {

View File

@@ -47,6 +47,9 @@ type DbInfo struct {
Params string
Database string // 若有schema的库则为'database/scheam'格式
Version DbVersion // 数据库版本信息,用于语法兼容
DefaultVersion bool // 经过查询数据库版本信息后,是否仍然使用默认版本
CodePath []string
SshTunnelMachineId int

View File

@@ -1,6 +1,8 @@
package dbi
import "database/sql"
import (
"database/sql"
)
var (
metas = make(map[DbType]Meta)
@@ -16,6 +18,8 @@ func GetMeta(dt DbType) Meta {
return metas[dt]
}
type DbVersion string
// 数据库元信息如获取sql.DB、Dialect等
type Meta interface {
// 根据数据库信息获取sql.DB

View File

@@ -16,6 +16,9 @@ type MetaData interface {
// GetDbServer 获取数据库服务实例信息
GetDbServer() (*DbServer, error)
// GetCompatibleDbVersion 获取兼容版本信息,如果有兼容版本,则需要实现对应版本的特殊方言处理器,以及前端的方言兼容版本
GetCompatibleDbVersion() DbVersion
// GetDbNames 获取数据库名称列表
GetDbNames() ([]string, error)

View File

@@ -13,6 +13,8 @@ type BaseMetaData interface {
// 默认库
DefaultDb() string
DbVersion() string
// 用于引用 SQL 标识符(关键字)的字符串
GetIdentifierQuoteString() string
@@ -43,10 +45,17 @@ type BaseMetaData interface {
type DefaultMetaData struct {
}
func (dd *DefaultMetaData) GetCompatibleDbVersion() DbVersion {
return ""
}
func (dd *DefaultMetaData) DefaultDb() string {
return ""
}
func (dd *DefaultMetaData) DbVersion() string {
return ""
}
func (dd *DefaultMetaData) GetIdentifierQuoteString() string {
return `"`
}

View File

@@ -73,3 +73,30 @@ FROM ALL_TAB_COLUMNS a
WHERE a.OWNER = (SELECT sys_context('USERENV', 'CURRENT_SCHEMA') FROM DUAL)
AND a.TABLE_NAME in (%s)
order by a.COLUMN_ID
---------------------------------------
--ORACLE11_COLUMN_MA 11版本的列信息
SELECT a.TABLE_NAME as TABLE_NAME,
a.COLUMN_NAME as COLUMN_NAME,
case
when a.NULLABLE = 'Y' then 'YES'
when a.NULLABLE = 'N' then 'NO'
else 'NO' end as NULLABLE,
a.DATA_TYPE as DATA_TYPE,
a.DATA_LENGTH as CHAR_MAX_LENGTH,
a.DATA_PRECISION as NUM_PRECISION,
a.DATA_SCALE as NUM_SCALE,
b.COMMENTS as COLUMN_COMMENT,
a.DATA_DEFAULT as COLUMN_DEFAULT,
CASE WHEN d.pri IS NOT NULL THEN 1 ELSE 0 END as IS_PRIMARY_KEY
FROM ALL_TAB_COLUMNS a
LEFT JOIN ALL_COL_COMMENTS b
on a.OWNER = b.OWNER AND a.TABLE_NAME = b.TABLE_NAME AND a.COLUMN_NAME = b.COLUMN_NAME
LEFT JOIN (select ac.TABLE_NAME, ac.OWNER, cc.COLUMN_NAME, 1 as pri
from ALL_CONSTRAINTS ac
join ALL_CONS_COLUMNS cc on cc.CONSTRAINT_NAME = ac.CONSTRAINT_NAME AND cc.OWNER = ac.OWNER
where cc.CONSTRAINT_NAME IS NOT NULL
AND ac.CONSTRAINT_TYPE = 'P') d
on d.OWNER = a.OWNER AND d.TABLE_NAME = a.TABLE_NAME AND d.COLUMN_NAME = a.COLUMN_NAME
WHERE a.OWNER = (SELECT sys_context('USERENV', 'CURRENT_SCHEMA') FROM DUAL)
AND a.TABLE_NAME in (%s)
order by a.COLUMN_ID

View File

@@ -9,13 +9,13 @@ import (
)
func init() {
dbi.Register(dbi.DbTypeDM, new(DmMeta))
dbi.Register(dbi.DbTypeDM, new(Meta))
}
type DmMeta struct {
type Meta struct {
}
func (md *DmMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
func (dm *Meta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
driverName := "dm"
db := d.Database
var dbParam string
@@ -40,11 +40,11 @@ func (md *DmMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
return sql.Open(driverName, dsn)
}
func (md *DmMeta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
func (dm *Meta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
return &DMDialect{dc: conn}
}
func (md *DmMeta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
func (dm *Meta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
return dbi.NewMetaDataX(&DMMetaData{
dc: conn,
})

View File

@@ -11,14 +11,14 @@ import (
)
func init() {
meta := new(MssqlMeta)
meta := new(Meta)
dbi.Register(dbi.DbTypeMssql, meta)
}
type MssqlMeta struct {
type Meta struct {
}
func (md *MssqlMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
func (mm *Meta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
err := d.IfUseSshTunnelChangeIpPort()
if err != nil {
return nil, err
@@ -53,10 +53,10 @@ func (md *MssqlMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
return sql.Open(driverName, dsn)
}
func (md *MssqlMeta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
func (mm *Meta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
return &MssqlDialect{dc: conn}
}
func (md *MssqlMeta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
func (mm *Meta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
return dbi.NewMetaDataX(&MssqlMetaData{dc: conn})
}

View File

@@ -38,7 +38,7 @@ var (
"longblob": dbi.CommonTypeLongblob,
"longtext": dbi.CommonTypeLongtext,
"mediumblob": dbi.CommonTypeBlob,
"mediumtext": dbi.CommonTypeText,
"mediumtext": dbi.CommonTypeMediumtext,
"bit": dbi.CommonTypeBit,
"set": dbi.CommonTypeVarchar,
"smallint": dbi.CommonTypeSmallint,
@@ -60,7 +60,7 @@ var (
dbi.CommonTypeLongtext: "longtext",
dbi.CommonTypeBinary: "binary",
dbi.CommonTypeMediumblob: "blob",
dbi.CommonTypeMediumtext: "text",
dbi.CommonTypeMediumtext: "mediumtext",
dbi.CommonTypeVarbinary: "varbinary",
dbi.CommonTypeInt: "int",
dbi.CommonTypeBit: "bit",

View File

@@ -11,15 +11,15 @@ import (
)
func init() {
meta := new(MysqlMeta)
meta := new(Meta)
dbi.Register(dbi.DbTypeMysql, meta)
dbi.Register(dbi.DbTypeMariadb, meta)
}
type MysqlMeta struct {
type Meta struct {
}
func (md *MysqlMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
func (mm *Meta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
// SSH Conect
if d.SshTunnelMachineId > 0 {
sshTunnelMachine, err := dbi.GetSshTunnel(d.SshTunnelMachineId)
@@ -39,10 +39,10 @@ func (md *MysqlMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
return sql.Open(driverName, dsn)
}
func (md *MysqlMeta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
func (mm *Meta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
return &MysqlDialect{dc: conn}
}
func (md *MysqlMeta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
func (mm *Meta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
return dbi.NewMetaDataX(&MysqlMetaData{dc: conn})
}

View File

@@ -15,6 +15,9 @@ import (
var (
// 数字类型
numberTypeRegexp = regexp.MustCompile(`(?i)int|double|float|number|decimal|byte|bit`)
dateTimeReg = regexp.MustCompile(`^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$`)
dateTimeIsoReg = regexp.MustCompile(`^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.*$`)
// 日期时间类型
datetimeTypeRegexp = regexp.MustCompile(`(?i)date|timestamp`)
@@ -83,6 +86,9 @@ func (dc *DataHelper) GetDataType(dbColumnType string) dbi.DataType {
func (dc *DataHelper) FormatData(dbColumnValue any, dataType dbi.DataType) string {
str := anyx.ToString(dbColumnValue)
if dateTimeReg.MatchString(str) || dateTimeIsoReg.MatchString(str) {
dataType = dbi.DataTypeDateTime
}
switch dataType {
// oracle把日期类型数据格式化输出
case dbi.DataTypeDateTime: // "2024-01-02T22:08:22.275697+08:00"
@@ -122,7 +128,7 @@ func (dc *DataHelper) WrapValue(dbColumnValue any, dataType dbi.DataType) string
val = strings.Replace(val, "\n", "\\n", -1)
return fmt.Sprintf("'%s'", val)
case dbi.DataTypeDate, dbi.DataTypeDateTime, dbi.DataTypeTime:
return fmt.Sprintf("to_timestamp('%s', 'yyyy-mm-dd hh24:mi:ss')", dc.FormatData(dbColumnValue, dataType))
return fmt.Sprintf("to_date('%s', 'yyyy-mm-dd hh24:mi:ss')", dc.FormatData(dbColumnValue, dataType))
}
return fmt.Sprintf("'%s'", dbColumnValue)
}

View File

@@ -12,13 +12,17 @@ import (
)
func init() {
dbi.Register(dbi.DbTypeOracle, new(OraMeta))
dbi.Register(dbi.DbTypeOracle, new(Meta))
}
type OraMeta struct {
const (
DbVersionOracle11 dbi.DbVersion = "11"
)
type Meta struct {
}
func (md *OraMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
func (om *Meta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
err := d.IfUseSshTunnelChangeIpPort()
if err != nil {
return nil, err
@@ -75,13 +79,38 @@ func (md *OraMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
return nil, err
}
}
return conn, err
}
func (om *OraMeta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
func (om *Meta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
return &OracleDialect{dc: conn}
}
func (om *OraMeta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
func (om *Meta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
// 查询数据库版本信息,以做兼容性处理
if conn.Info.Version == "" && !conn.Info.DefaultVersion {
if conn.GetDb() != nil {
_, res, _ := conn.Query("select VERSION from v$instance")
if len(res) > 0 {
version := cast.ToString(res[0]["VERSION"])
// 11开头为11g版本
if strings.HasPrefix(version, "11") {
conn.Info.Version = DbVersionOracle11
conn.Info.DefaultVersion = false
} else {
conn.Info.DefaultVersion = true
}
}
}
}
if conn.Info.Version == DbVersionOracle11 {
md := &OracleMetaData11{}
md.dc = conn
md.version = DbVersionOracle11
return dbi.NewMetaDataX(md)
}
return dbi.NewMetaDataX(&OracleMetaData{dc: conn})
}

View File

@@ -25,6 +25,12 @@ type OracleMetaData struct {
dbi.DefaultMetaData
dc *dbi.DbConn
version dbi.DbVersion
}
func (od *OracleMetaData11) GetCompatibleDbVersion() dbi.DbVersion {
return od.version
}
func (od *OracleMetaData) GetDbServer() (*dbi.DbServer, error) {
@@ -306,10 +312,19 @@ end`
if len(columnComments) > 0 {
sqlArr = append(sqlArr, columnComments...)
}
otherSql := od.GenerateTableOtherDDL(tableInfo, quoteTableName, columns)
if len(otherSql) > 0 {
sqlArr = append(sqlArr, otherSql...)
}
return sqlArr
}
// 11g及以下版本会设置自增序列
func (od *OracleMetaData) GenerateTableOtherDDL(tableInfo dbi.Table, quoteTableName string, columns []dbi.Column) []string {
return nil
}
// 获取建表ddl
func (od *OracleMetaData) GetTableDDL(tableName string, dropBeforeCreate bool) (string, error) {

View File

@@ -0,0 +1,109 @@
package oracle
import (
"fmt"
"mayfly-go/internal/db/dbm/dbi"
"mayfly-go/pkg/utils/collx"
"strings"
"github.com/may-fly/cast"
)
const (
ORACLE11_COLUMN_MA_KEY = "ORACLE11_COLUMN_MA"
)
type OracleMetaData11 struct {
OracleMetaData
}
// 获取列元信息, 如列名等
func (od *OracleMetaData11) GetColumns(tableNames ...string) ([]dbi.Column, error) {
meta := od.dc.GetMetaData()
tableName := strings.Join(collx.ArrayMap[string, string](tableNames, func(val string) string {
return fmt.Sprintf("'%s'", meta.RemoveQuote(val))
}), ",")
// 如果表数量超过了1000需要分批查询
if len(tableNames) > 1000 {
columns := make([]dbi.Column, 0)
for i := 0; i < len(tableNames); i += 1000 {
end := i + 1000
if end > len(tableNames) {
end = len(tableNames)
}
tables := tableNames[i:end]
cols, err := od.GetColumns(tables...)
if err != nil {
return nil, err
}
columns = append(columns, cols...)
}
return columns, nil
}
_, res, err := od.dc.Query(fmt.Sprintf(dbi.GetLocalSql(ORACLE_META_FILE, ORACLE11_COLUMN_MA_KEY), tableName))
if err != nil {
return nil, err
}
columnHelper := meta.GetColumnHelper()
columns := make([]dbi.Column, 0)
for _, re := range res {
column := dbi.Column{
TableName: cast.ToString(re["TABLE_NAME"]),
ColumnName: cast.ToString(re["COLUMN_NAME"]),
DataType: dbi.ColumnDataType(cast.ToString(re["DATA_TYPE"])),
CharMaxLength: cast.ToInt(re["CHAR_MAX_LENGTH"]),
ColumnComment: cast.ToString(re["COLUMN_COMMENT"]),
Nullable: cast.ToString(re["NULLABLE"]) == "YES",
IsPrimaryKey: cast.ToInt(re["IS_PRIMARY_KEY"]) == 1,
IsIdentity: cast.ToInt(re["IS_IDENTITY"]) == 1,
ColumnDefault: cast.ToString(re["COLUMN_DEFAULT"]),
NumPrecision: cast.ToInt(re["NUM_PRECISION"]),
NumScale: cast.ToInt(re["NUM_SCALE"]),
}
columnHelper.FixColumn(&column)
columns = append(columns, column)
}
return columns, nil
}
func (od *OracleMetaData11) genColumnBasicSql(column dbi.Column) string {
meta := od.dc.GetMetaData()
colName := meta.QuoteIdentifier(column.ColumnName)
if column.IsIdentity {
// 11g以前的版本 如果是自增自增列数据类型必须是number不需要设置默认值和空值建表后设置自增序列
return fmt.Sprintf(" %s NUMBER", colName)
}
nullAble := ""
if !column.Nullable {
nullAble = " NOT NULL"
}
defVal := ""
if column.ColumnDefault != "" {
defVal = fmt.Sprintf(" DEFAULT %v", column.ColumnDefault)
}
columnSql := fmt.Sprintf(" %s %s%s%s", colName, column.GetColumnType(), defVal, nullAble)
return columnSql
}
// 11g及以下版本会设置自增序列和触发器
func (od *OracleMetaData11) GenerateTableOtherDDL(tableInfo dbi.Table, quoteTableName string, columns []dbi.Column) []string {
result := make([]string, 0)
for _, col := range columns {
if col.IsIdentity {
seqName := fmt.Sprintf("%s_%s_seq", tableInfo.TableName, col.ColumnName)
trgName := fmt.Sprintf("%s_%s_trg", tableInfo.TableName, col.ColumnName)
result = append(result, fmt.Sprintf("CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1", seqName))
result = append(result, fmt.Sprintf("CREATE OR REPLACE TRIGGER %s BEFORE INSERT ON %s FOR EACH ROW WHEN (NEW.%s IS NULL) BEGIN SELECT %s.nextval INTO :new.%s FROM dual; END", trgName, quoteTableName, col.ColumnName, seqName, col.ColumnName))
}
}
return result
}

View File

@@ -15,22 +15,22 @@ import (
)
func init() {
meta := new(PostgresMeta)
meta := new(Meta)
dbi.Register(dbi.DbTypePostgres, meta)
dbi.Register(dbi.DbTypeKingbaseEs, meta)
dbi.Register(dbi.DbTypeVastbase, meta)
gauss := &PostgresMeta{
gauss := &Meta{
Param: "dbtype=gauss",
}
dbi.Register(dbi.DbTypeGauss, gauss)
}
type PostgresMeta struct {
type Meta struct {
Param string
}
func (pm *PostgresMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
func (pm *Meta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
driverName := "postgres"
// SSH Conect
if d.SshTunnelMachineId > 0 {
@@ -81,11 +81,11 @@ func (pm *PostgresMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
return sql.Open(driverName, dsn)
}
func (pm *PostgresMeta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
func (pm *Meta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
return &PgsqlDialect{dc: conn}
}
func (pm *PostgresMeta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
func (pm *Meta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
return dbi.NewMetaDataX(&PgsqlMetaData{dc: conn})
}

View File

@@ -8,13 +8,13 @@ import (
)
func init() {
dbi.Register(dbi.DbTypeSqlite, new(SqliteMeta))
dbi.Register(dbi.DbTypeSqlite, new(Meta))
}
type SqliteMeta struct {
type Meta struct {
}
func (md *SqliteMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
func (md *Meta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
// 用host字段来存sqlite的文件路径
// 检查文件是否存在,否则报错基于sqlite会自动创建文件为了服务器文件安全所以先确定文件存在再连接不自动创建
if _, err := os.Stat(d.Host); err != nil {
@@ -29,10 +29,10 @@ func (md *SqliteMeta) GetSqlDb(d *dbi.DbInfo) (*sql.DB, error) {
return db, err
}
func (sm *SqliteMeta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
func (sm *Meta) GetDialect(conn *dbi.DbConn) dbi.Dialect {
return &SqliteDialect{dc: conn}
}
func (sm *SqliteMeta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
func (sm *Meta) GetMetaData(conn *dbi.DbConn) *dbi.MetaDataX {
return dbi.NewMetaDataX(&SqliteMetaData{dc: conn})
}

View File

@@ -22,8 +22,9 @@ type DataSyncTask struct {
SrcTagPath string `orm:"column(src_tag_path)" json:"srcTagPath"`
DataSql string `orm:"column(data_sql)" json:"dataSql"` // 数据源查询sql
PageSize int `orm:"column(page_size)" json:"pageSize"` // 配置分页sql查询的条数
UpdField string `orm:"column(upd_field)" json:"updField"` //更新字段, 选择由哪个字段为更新字段查询数据源的时候会带上这个字段where update_time > {最近更新的最大值}
UpdField string `orm:"column(upd_field)" json:"updField"` // 更新字段, 选择由哪个字段为更新字段查询数据源的时候会带上这个字段where update_time > {最近更新的最大值}
UpdFieldVal string `orm:"column(upd_field_val)" json:"updFieldVal"` // 更新字段当前值
UpdFieldSrc string `orm:"column(upd_field_src)" json:"updFieldSrc"` // 更新值来源, 如select name as user_name from user; 则updFieldSrc的值为user_name
// 目标数据库信息
TargetDbId int64 `orm:"column(target_db_id)" json:"targetDbId"`

View File

@@ -7,9 +7,15 @@ import (
type DbTransferTask struct {
model.Model
RunningState DbTransferRunningState `orm:"column(running_state)" json:"runningState"` // 运行状态
LogId uint64 `json:"logId"`
TaskName string `orm:"column(task_name)" json:"taskName"` // 任务名称
RunningState int8 `orm:"column(running_state)" json:"runningState"` // 运行状态
LogId uint64 `json:"logId"`
TaskName string `orm:"column(task_name)" json:"taskName"` // 任务名称
Status int8 `orm:"column(status)" json:"status"` // 启用状态 1启用 -1禁用
CronAble int8 `orm:"column(cron_able)" json:"cronAble"` // 是否定时 1是 -1否
Cron string `orm:"column(cron)" json:"cron"` // 定时任务cron表达式
Mode int8 `orm:"column(mode)" json:"mode"` // 数据迁移方式1、迁移到数据库 2、迁移到文件
TargetFileDbType string `orm:"column(target_file_db_type)" json:"targetFileDbType"` // 目标文件数据库类型
TaskKey string `orm:"column(key)" json:"taskKey"` // 定时任务唯一uuid key
CheckedKeys string `orm:"column(checked_keys)" json:"checkedKeys"` // 选中需要迁移的表
DeleteTable int `orm:"column(delete_table)" json:"deleteTable"` // 创建表前是否删除表
@@ -34,14 +40,18 @@ func (d *DbTransferTask) TableName() string {
return "t_db_transfer_task"
}
type DbTransferRunningState int8
const (
DbTransferTaskStatusEnable int = 1 // 启用状态
DbTransferTaskStatusDisable int = -1 // 禁用状态
DbTransferTaskStatusEnable int8 = 1 // 启用状态
DbTransferTaskStatusDisable int8 = -1 // 禁用状态
DbTransferTaskRunStateSuccess DbTransferRunningState = 2 // 执行成功
DbTransferTaskRunStateRunning DbTransferRunningState = 1 // 运行中状态
DbTransferTaskRunStateFail DbTransferRunningState = -1 // 执行失败
DbTransferTaskRunStateStop DbTransferRunningState = -2 // 手动终止
DbTransferTaskCronAbleEnable int8 = 1 // 是否定时 1是
DbTransferTaskCronAbleDisable int8 = -1 // 是否定时 -1否
DbTransferTaskModeDb int8 = 1 // 数据迁移方式1、迁移到数据库
DbTransferTaskModeFile int8 = 2 // 数据迁移方式2、迁移到文件
DbTransferTaskRunStateSuccess int8 = 2 // 执行成功
DbTransferTaskRunStateRunning int8 = 1 // 运行中状态
DbTransferTaskRunStateFail int8 = -1 // 执行失败
DbTransferTaskRunStateStop int8 = -2 // 手动终止
)

View File

@@ -0,0 +1,28 @@
package entity
import (
"mayfly-go/pkg/model"
"time"
)
type DbTransferFile struct {
model.IdModel
IsDeleted int8 `orm:"column(is_deleted)" json:"-"` // 是否删除 1是 0否
CreateTime *time.Time `orm:"column(create_time)" json:"createTime"` // 创建时间,默认当前时间戳
Status int8 `orm:"column(status)" json:"status"` // 状态 1、执行中 2、执行成功 3、执行失败
TaskId uint64 `orm:"column(task_id)" json:"taskId"` // 迁移任务ID
LogId uint64 `orm:"column(log_id)" json:"logId"` // 日志ID
FileDbType string `orm:"column(file_db_type)" json:"fileDbType"` // sql文件数据库类型
FileName string `orm:"column(file_name)" json:"fileName"` // 显式文件名
FileUuid string `orm:"column(file_uuid)" json:"fileUuid"` // 文件真实id拼接后可以下载
}
func (d *DbTransferFile) TableName() string {
return "t_db_transfer_files"
}
const (
DbTransferFileStatusRunning int8 = 1
DbTransferFileStatusSuccess int8 = 2
DbTransferFileStatusFail int8 = -1
)

View File

@@ -20,7 +20,13 @@ type DataSyncLogQuery struct {
}
type DbTransferTaskQuery struct {
Name string `json:"name" form:"name"`
Name string `json:"name" form:"name"`
Status int8 `json:"status" form:"status"`
CronAble int8 `json:"cronAble" form:"cronAble"`
}
type DbTransferFileQuery struct {
TaskId uint64 `json:"task_id" form:"taskId"`
Name string `json:"name" form:"name"`
}
type DbTransferLogQuery struct {

View File

@@ -0,0 +1,14 @@
package repository
import (
"mayfly-go/internal/db/domain/entity"
"mayfly-go/pkg/base"
"mayfly-go/pkg/model"
)
type DbTransferFile interface {
base.Repo[*entity.DbTransferFile]
// 分页获取数据库实例信息列表
GetPageList(condition *entity.DbTransferFileQuery, pageParam *model.PageParam, toEntity any, orderBy ...string) (*model.PageResult[any], error)
}

View File

@@ -18,7 +18,9 @@ func newDbTransferTaskRepo() repository.DbTransferTask {
// 分页获取数据库信息列表
func (d *dbTransferTaskRepoImpl) GetTaskList(condition *entity.DbTransferTaskQuery, pageParam *model.PageParam, toEntity any, orderBy ...string) (*model.PageResult[any], error) {
qd := model.NewCond().
Like("task_name", condition.Name)
Like("task_name", condition.Name).
Eq("status", condition.Status).
Eq("cron_able", condition.CronAble)
//Eq("status", condition.Status)
return d.PageByCondToAny(qd, pageParam, toEntity)
}

View File

@@ -0,0 +1,25 @@
package persistence
import (
"mayfly-go/internal/db/domain/entity"
"mayfly-go/internal/db/domain/repository"
"mayfly-go/pkg/base"
"mayfly-go/pkg/model"
)
type dbTransferFileRepoImpl struct {
base.RepoImpl[*entity.DbTransferFile]
}
func newDbTransferFileRepo() repository.DbTransferFile {
return &dbTransferFileRepoImpl{base.RepoImpl[*entity.DbTransferFile]{M: new(entity.DbTransferFile)}}
}
// 分页获取数据库信息列表
func (d *dbTransferFileRepoImpl) GetPageList(condition *entity.DbTransferFileQuery, pageParam *model.PageParam, toEntity any, orderBy ...string) (*model.PageResult[any], error) {
qd := model.NewCond().
Eq("task_id", condition.TaskId).
OrderByDesc("create_time")
//Eq("status", condition.Status)
return d.PageByCondToAny(qd, pageParam, toEntity)
}

View File

@@ -12,6 +12,7 @@ func InitIoc() {
ioc.Register(newDataSyncTaskRepo(), ioc.WithComponentName("DbDataSyncTaskRepo"))
ioc.Register(newDataSyncLogRepo(), ioc.WithComponentName("DbDataSyncLogRepo"))
ioc.Register(newDbTransferTaskRepo(), ioc.WithComponentName("DbTransferTaskRepo"))
ioc.Register(newDbTransferFileRepo(), ioc.WithComponentName("DbTransferFileRepo"))
ioc.Register(NewDbBackupRepo(), ioc.WithComponentName("DbBackupRepo"))
ioc.Register(NewDbBackupHistoryRepo(), ioc.WithComponentName("DbBackupHistoryRepo"))

View File

@@ -30,6 +30,8 @@ func InitDbRouter(router *gin.RouterGroup) {
req.NewGet(":dbId/t-create-ddl", d.GetTableDDL),
req.NewGet(":dbId/version", d.GetVersion),
req.NewGet(":dbId/pg/schemas", d.GetSchemas),
req.NewPost(":dbId/exec-sql", d.ExecSql).Log(req.NewLog("db-执行Sql")),

View File

@@ -20,16 +20,32 @@ func InitDbTransferRouter(router *gin.RouterGroup) {
req.NewGet("", d.Tasks),
// 保存任务 /datasync/save
req.NewPost("save", d.SaveTask).Log(req.NewLogSave("datasync-保存数据迁移任务信息")).RequiredPermissionCode("db:transfer:save"),
req.NewPost("save", d.SaveTask).Log(req.NewLogSave("dts-保存数据迁移任务信息")).RequiredPermissionCode("db:transfer:save"),
// 删除任务 /datasync/:taskId/del
req.NewDelete(":taskId/del", d.DeleteTask).Log(req.NewLogSave("datasync-删除数据迁移任务信息")).RequiredPermissionCode("db:transfer:del"),
req.NewDelete(":taskId/del", d.DeleteTask).Log(req.NewLogSave("dts-删除数据迁移任务信息")).RequiredPermissionCode("db:transfer:del"),
// 启停用任务 /datasync/status
req.NewPost(":taskId/status", d.ChangeStatus).Log(req.NewLogSave("dts-启停任务")).RequiredPermissionCode("db:transfer:status"),
// 立即执行任务 /datasync/run
req.NewPost(":taskId/run", d.Run).Log(req.NewLog("DBMS-执行数据迁移任务")).RequiredPermissionCode("db:transfer:run"),
req.NewPost(":taskId/run", d.Run).Log(req.NewLog("dts-执行数据迁移任务")).RequiredPermissionCode("db:transfer:run"),
// 停止正在执行中的任务
req.NewPost(":taskId/stop", d.Stop).Log(req.NewLogSave("DBMS-终止数据迁移任务")),
req.NewPost(":taskId/stop", d.Stop).Log(req.NewLogSave("dts-终止数据迁移任务")).RequiredPermissionCode("db:transfer:run"),
// 导出文件管理-列表
req.NewGet("/files/:taskId", d.Files),
req.NewPost("/files/rename", d.FileRename).Log(req.NewLogSave("dts-删除迁移文件")).RequiredPermissionCode("db:transfer:files:rename"),
// 导出文件管理-删除
req.NewPost("/files/del/:fileId", d.FileDel).Log(req.NewLogSave("dts-删除迁移文件")).RequiredPermissionCode("db:transfer:files:del"),
req.NewPost("/files/run", d.FileRun).Log(req.NewLogSave("dts-执行sql文件")).RequiredPermissionCode("db:transfer:files:run"),
// 导出文件管理-下载
req.NewGet("/files/down/:fileUuid", d.FileDown).Log(req.NewLogSave("dts-下载迁移文件")).RequiredPermissionCode("db:transfer:files:down"),
}
req.BatchSetGroup(instances, reqs[:])

View File

@@ -7,6 +7,7 @@ import "mayfly-go/pkg/utils/anyx"
const SuccessSysMsgType = 1
const ErrorSysMsgType = 0
const InfoSysMsgType = 2
const InfoTypeSqlExecProgress = 22
// websocket消息
type SysMsg struct {
@@ -42,6 +43,9 @@ func (sm *SysMsg) WithClientId(clientId string) *SysMsg {
func InfoSysMsg(title string, msg any) *SysMsg {
return &SysMsg{Type: InfoSysMsgType, Title: title, Msg: anyx.ToString(msg)}
}
func InfoSqlProgressMsg(title string, msg any) *SysMsg {
return &SysMsg{Type: InfoTypeSqlExecProgress, Title: title, Msg: anyx.ToString(msg)}
}
// 成功消息
func SuccessSysMsg(title string, msg any) *SysMsg {

View File

@@ -0,0 +1,51 @@
package writer
import (
"io"
"os"
"path/filepath"
)
type FileWriter struct {
tryFlushCount int
writer *os.File
aborted bool
}
func NewFileWriter(filePath string) *FileWriter {
if filePath == "" {
panic("filePath is empty")
}
// 使用filepath.Dir函数提取文件夹路径
dir := filepath.Dir(filePath)
if dir != "" {
// 检查文件夹路径,不存在则创建
if _, err := os.Stat(dir); os.IsNotExist(err) {
err = os.MkdirAll(dir, os.ModePerm)
if err != nil {
panic(err)
}
}
}
fw, err := os.Create(filePath)
if err != nil {
panic(err)
}
return &FileWriter{writer: fw}
}
func (f *FileWriter) Close() {
f.writer.Close()
}
func (f *FileWriter) TryFlush() {
}
func (f *FileWriter) Write(b []byte) (n int, err error) {
return f.writer.Write(b)
}
func (f *FileWriter) WriteString(data string) {
io.WriteString(f.writer, data)
}

View File

@@ -1,4 +1,4 @@
package application
package writer
import (
"compress/gzip"
@@ -6,17 +6,17 @@ import (
"mayfly-go/pkg/biz"
)
type gzipWriter struct {
type GzipWriter struct {
tryFlushCount int
writer *gzip.Writer
aborted bool
}
func newGzipWriter(writer io.Writer) *gzipWriter {
return &gzipWriter{writer: gzip.NewWriter(writer)}
func NewGzipWriter(writer io.Writer) *GzipWriter {
return &GzipWriter{writer: gzip.NewWriter(writer)}
}
func (g *gzipWriter) WriteString(data string) {
func (g *GzipWriter) WriteString(data string) {
if g.aborted {
return
}
@@ -26,7 +26,7 @@ func (g *gzipWriter) WriteString(data string) {
}
}
func (g *gzipWriter) Write(p []byte) (n int, err error) {
func (g *GzipWriter) Write(p []byte) (n int, err error) {
if g.aborted {
return
}
@@ -38,11 +38,11 @@ func (g *gzipWriter) Write(p []byte) (n int, err error) {
return
}
func (g *gzipWriter) Close() {
func (g *GzipWriter) Close() {
g.writer.Close()
}
func (g *gzipWriter) TryFlush() {
func (g *GzipWriter) TryFlush() {
if g.tryFlushCount%1000 == 0 {
g.writer.Flush()
}

View File

@@ -0,0 +1,10 @@
package writer
import "io"
type CustomWriter interface {
io.Writer
WriteString(data string)
Close()
TryFlush()
}

View File

@@ -64,6 +64,13 @@ CREATE TABLE `t_db_transfer_task` (
`update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
`is_deleted` tinyint(1) DEFAULT '0' COMMENT '是否删除',
`task_name` varchar(100) NULL comment '任务名',
`cron_able` TINYINT(3) NOT NULL DEFAULT 0 comment '是否定时 1是 -1否',
`cron` VARCHAR(20) NULL comment '定时任务cron表达式',
`task_key` varchar(100) NULL comment '定时任务唯一uuid key',
`mode` TINYINT(3) NOT NULL DEFAULT 1 comment '数据迁移方式1、迁移到数据库 2、迁移到文件',
`target_file_db_type` varchar(200) NULL comment '目标文件语言类型类型枚举同target_db_type',
`status` tinyint(3) NOT NULL DEFAULT '1' comment '启用状态 1启用 -1禁用',
`upd_field_src` varchar(100) DEFAULT NULL COMMENT '更新值来源字段,默认同更新字段,如果查询结果指定了字段别名且与原更新字段不一致,则取这个字段值为当前更新值',
`delete_time` datetime DEFAULT NULL COMMENT '删除时间',
`checked_keys` text NOT NULL COMMENT '选中需要迁移的表',
`delete_table` tinyint(4) NOT NULL COMMENT '创建表前是否删除表 1是 -1否',
@@ -84,6 +91,21 @@ CREATE TABLE `t_db_transfer_task` (
PRIMARY KEY (`id`)
) COMMENT='数据库迁移任务表';
DROP TABLE IF EXISTS `t_db_transfer_files`;
CREATE TABLE `t_db_transfer_files` (
`id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`is_deleted` tinyint(3) NOT NULL DEFAULT 0 COMMENT '是否删除',
`delete_time` datetime COMMENT '删除时间',
`status` tinyint(3) NOT NULL DEFAULT 1 COMMENT '状态1、执行中 2、执行失败 3、 执行成功',
`task_id` bigint COMMENT '迁移任务ID',
`log_id` bigint COMMENT '日志ID',
`file_db_type` varchar(200) COMMENT 'sql文件数据库类型',
`file_name` varchar(200) COMMENT '显式文件名 默认: 年月日时分秒.zip',
`file_uuid` varchar(50) COMMENT '文件真实uuid拼接后可以下载',
PRIMARY KEY (id)
) COMMENT '数据库迁移文件管理';
-- ----------------------------
-- Table structure for t_db_sql
-- ----------------------------
@@ -647,7 +669,7 @@ INSERT INTO `t_sys_config` (name, `key`, params, value, remark, permission, crea
INSERT INTO `t_sys_config` (name, `key`, params, value, remark, permission, create_time, creator_id, creator, update_time, modifier_id, modifier, is_deleted, delete_time) VALUES('ldap登录配置', 'LdapLogin', '[{"name":"是否启用","model":"enable","placeholder":"是否启用","options":"true,false"},{"name":"host","model":"host","placeholder":"host"},{"name":"port","model":"port","placeholder":"port"},{"name":"bindDN","model":"bindDN","placeholder":"LDAP 服务的管理员账号,如: \\"cn=admin,dc=example,dc=com\\""},{"name":"bindPwd","model":"bindPwd","placeholder":"LDAP 服务的管理员密码"},{"name":"baseDN","model":"baseDN","placeholder":"用户所在的 base DN, 如: \\"ou=users,dc=example,dc=com\\""},{"name":"userFilter","model":"userFilter","placeholder":"过滤用户的方式, 如: \\"(uid=%s)、(&(objectClass=organizationalPerson)(uid=%s))\\""},{"name":"uidMap","model":"uidMap","placeholder":"用户id和 LDAP 字段名之间的映射关系,如: cn"},{"name":"udnMap","model":"udnMap","placeholder":"用户姓名(dispalyName)和 LDAP 字段名之间的映射关系,如: displayName"},{"name":"emailMap","model":"emailMap","placeholder":"用户email和 LDAP 字段名之间的映射关系"},{"name":"skipTLSVerify","model":"skipTLSVerify","placeholder":"客户端是否跳过 TLS 证书验证","options":"true,false"},{"name":"安全协议","model":"securityProtocol","placeholder":"安全协议为Null不使用安全协议如: StartTLS, LDAPS","options":"Null,StartTLS,LDAPS"}]', '', 'ldap登录相关配置', 'admin,', '2023-08-25 21:47:20', 1, 'admin', '2023-08-25 22:56:07', 1, 'admin', 0, NULL);
INSERT INTO `t_sys_config` (`name`, `key`, `params`, `value`, `remark`, `permission`, `create_time`, `creator_id`, `creator`, `update_time`, `modifier_id`, `modifier`, `is_deleted`, `delete_time`) VALUES('系统全局样式设置', 'SysStyleConfig', '[{"model":"logoIcon","name":"logo图标","placeholder":"系统logo图标base64编码, 建议svg格式不超过10k","required":false},{"model":"title","name":"菜单栏标题","placeholder":"系统菜单栏标题展示","required":false},{"model":"viceTitle","name":"登录页标题","placeholder":"登录页标题展示","required":false},{"model":"useWatermark","name":"是否启用水印","placeholder":"是否启用系统水印","options":"true,false","required":false},{"model":"watermarkContent","name":"水印补充信息","placeholder":"额外水印信息","required":false}]', '{"title":"mayfly-go","viceTitle":"mayfly-go","logoIcon":"","useWatermark":"true","watermarkContent":""}', '系统icon、标题、水印信息等配置', 'all', '2024-01-04 15:17:18', 1, 'admin', '2024-01-05 09:40:44', 1, 'admin', 0, NULL);
INSERT INTO t_sys_config ( name, `key`, params, value, remark, permission, create_time, creator_id, creator, update_time, modifier_id, modifier, is_deleted, delete_time) VALUES('机器相关配置', 'MachineConfig', '[{"name":"终端回放存储路径","model":"terminalRecPath","placeholder":"终端回放存储路径"},{"name":"uploadMaxFileSize","model":"uploadMaxFileSize","placeholder":"允许上传的最大文件大小(1MB、2GB等)"},{"model":"termOpSaveDays","name":"终端记录保存时间","placeholder":"终端记录保存时间(单位天)"},{"model":"guacdHost","name":"guacd服务ip","placeholder":"guacd服务ip默认 127.0.0.1","required":false},{"name":"guacd服务端口","model":"guacdPort","placeholder":"guacd服务端口默认 4822","required":false},{"model":"guacdFilePath","name":"guacd服务文件存储位置","placeholder":"guacd服务文件存储位置用于挂载RDP文件夹"},{"name":"guacd服务记录存储位置","model":"guacdRecPath","placeholder":"guacd服务记录存储位置用于记录rdp操作记录"}]', '{"terminalRecPath":"./rec","uploadMaxFileSize":"1000MB","termOpSaveDays":"30","guacdHost":"","guacdPort":"","guacdFilePath":"./guacd/rdp-file","guacdRecPath":"./guacd/rdp-rec"}', '机器相关配置,如终端回放路径等', 'all', '2023-07-13 16:26:44', 1, 'admin', '2024-04-06 12:25:03', 1, 'admin', 0, NULL);
INSERT INTO `t_sys_config` (`name`, `key`, `params`, `value`, `remark`, `permission`, `create_time`, `creator_id`, `creator`, `update_time`, `modifier_id`, `modifier`, `is_deleted`, `delete_time`) VALUES('数据库备份恢复', 'DbBackupRestore', '[{"model":"backupPath","name":"备份路径","placeholder":"备份文件存储路径"}]', '{"backupPath":"./db/backup"}', '', 'admin,', '2023-12-29 09:55:26', 1, 'admin', '2023-12-29 15:45:24', 1, 'admin', 0, NULL);
INSERT INTO `t_sys_config` (`id`, `name`, `key`, `params`, `value`, `remark`, `permission`, `create_time`, `creator_id`, `creator`, `update_time`, `modifier_id`, `modifier`, `is_deleted`, `delete_time`) VALUES(10, '数据库备份恢复', 'DbBackupRestore', '[{"model":"backupPath","name":"备份路径","placeholder":"备份文件存储路径"},{"model":"transferPath","name":"迁移路径","placeholder":"数据库迁移文件存储路径"}]', '{"backupPath":"./db/backup","transferPath":"./db/transfer"}', '数据库备份恢复', 'all', '2023-12-29 09:55:26', 1, 'admin', '2024-08-27 15:22:22', 12, 'liuzongyang', 0, NULL);
INSERT INTO `t_sys_config` (`name`, `key`, `params`, `value`, `remark`, `permission`, `create_time`, `creator_id`, `creator`, `update_time`, `modifier_id`, `modifier`, `is_deleted`, `delete_time`) VALUES('Mysql可执行文件', 'MysqlBin', '[{"model":"path","name":"路径","placeholder":"可执行文件路径","required":true},{"model":"mysql","name":"mysql","placeholder":"mysql命令路径(空则为 路径/mysql)","required":false},{"model":"mysqldump","name":"mysqldump","placeholder":"mysqldump命令路径(空则为 路径/mysqldump)","required":false},{"model":"mysqlbinlog","name":"mysqlbinlog","placeholder":"mysqlbinlog命令路径(空则为 路径/mysqlbinlog)","required":false}]', '{"mysql":"","mysqldump":"","mysqlbinlog":"","path":"./db/mysql/bin"}', '', 'admin,', '2023-12-29 10:01:33', 1, 'admin', '2023-12-29 13:34:40', 1, 'admin', 0, NULL);
INSERT INTO `t_sys_config` (`name`, `key`, `params`, `value`, `remark`, `permission`, `create_time`, `creator_id`, `creator`, `update_time`, `modifier_id`, `modifier`, `is_deleted`, `delete_time`) VALUES('MariaDB可执行文件', 'MariadbBin', '[{"model":"path","name":"路径","placeholder":"可执行文件路径","required":true},{"model":"mysql","name":"mysql","placeholder":"mysql命令路径(空则为 路径/mysql)","required":false},{"model":"mysqldump","name":"mysqldump","placeholder":"mysqldump命令路径(空则为 路径/mysqldump)","required":false},{"model":"mysqlbinlog","name":"mysqlbinlog","placeholder":"mysqlbinlog命令路径(空则为 路径/mysqlbinlog)","required":false}]', '{"mysql":"","mysqldump":"","mysqlbinlog":"","path":"./db/mariadb/bin"}', '', 'admin,', '2023-12-29 10:01:33', 1, 'admin', '2023-12-29 13:34:40', 1, 'admin', 0, NULL);
INSERT INTO `t_sys_config` (`name`, `key`, `params`, `value`, `remark`, `permission`, `create_time`, `creator_id`, `creator`, `update_time`, `modifier_id`, `modifier`, `is_deleted`, `delete_time`) VALUES('DBMS配置', 'DbmsConfig', '[{"model":"querySqlSave","name":"记录查询sql","placeholder":"是否记录查询类sql","options":"true,false"},{"model":"maxResultSet","name":"最大结果集","placeholder":"允许sql查询的最大结果集数。注: 0=不限制","options":""},{"model":"sqlExecTl","name":"sql执行时间限制","placeholder":"超过该时间(单位:秒),执行将被取消"}]', '{"querySqlSave":"false","maxResultSet":"0","sqlExecTl":"60"}', 'DBMS相关配置', 'admin,', '2024-03-06 13:30:51', 1, 'admin', '2024-03-06 14:07:16', 1, 'admin', 0, NULL);
@@ -831,6 +853,10 @@ INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `we
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1709196723, 1709194669, 2, 1, '启停', 'db:transfer:status', 1709196723, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-02-29 16:52:04', '2024-02-29 16:52:04', 'SmLcpu6c/hGiLN1VT/', 0, NULL);
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1709196737, 1709194669, 2, 1, '日志', 'db:transfer:log', 1709196737, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-02-29 16:52:17', '2024-02-29 16:52:17', 'SmLcpu6c/CZhNIbWg/', 0, NULL);
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1709196755, 1709194669, 2, 1, '运行', 'db:transfer:run', 1709196755, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-02-29 16:52:36', '2024-02-29 16:52:36', 'SmLcpu6c/b6yHt6V2/', 0, NULL);
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1724376022, 1709194669, 2, 1, '文件-删除', 'db:transfer:files:del', 1724376022, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-08-23 09:20:23', '2024-08-23 14:50:21', 'SmLcpu6c/HIURtJJA/', 0, NULL);
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1724395850, 1709194669, 2, 1, '文件-下载', 'db:transfer:files:down', 1724395850, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-08-23 14:50:51', '2024-08-23 14:50:51', 'SmLcpu6c/FmqK4azt/', 0, NULL);
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1724398262, 1709194669, 2, 1, '文件', 'db:transfer:files', 1724376021, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-08-23 15:31:02', '2024-08-23 15:31:16', 'SmLcpu6c/btVtrbhk/', 0, NULL);
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1724817775, 1709194669, 2, 1, '文件-重命名', 'db:transfer:files:rename', 1724376021, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-08-28 12:02:56', '2024-08-28 12:03:01', 'SmLcpu6c/zu4fvnuA/', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(1714032002, 1713875842, '12sSjal1/UnWIUhW0/0tJwC3Gf/', 2, 1, '命令配置-删除', 'cmdconf:del', 1714032002, 'null', 1, 'admin', 1, 'admin', '2024-04-25 16:00:02', '2024-04-25 16:00:02', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(1714031981, 1713875842, '12sSjal1/UnWIUhW0/tEzIKecl/', 2, 1, '命令配置-保存', 'cmdconf:save', 1714031981, 'null', 1, 'admin', 1, 'admin', '2024-04-25 15:59:41', '2024-04-25 15:59:41', 0, NULL);
INSERT INTO t_sys_resource (id, pid, ui_path, `type`, status, name, code, weight, meta, creator_id, creator, modifier_id, modifier, create_time, update_time, is_deleted, delete_time) VALUES(1713875842, 2, '12sSjal1/UnWIUhW0/', 1, 1, '安全配置', 'security', 1713875842, '{"component":"ops/machine/security/SecurityConfList","icon":"Setting","isKeepAlive":true,"routeName":"SecurityConfList"}', 1, 'admin', 1, 'admin', '2024-04-23 20:37:22', '2024-04-23 20:37:22', 0, NULL);

View File

@@ -1,5 +1,47 @@
-- 数据同步新增字段
ALTER TABLE `t_db_data_sync_task`
ADD COLUMN `upd_field_src` varchar(100) DEFAULT NULL COMMENT '更新值来源字段,默认同更新字段,如果查询结果指定了字段别名且与原更新字段不一致,则取这个字段值为当前更新值';
-- 新增数据库迁移到文件的菜单资源
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1724376022, 1709194669, 2, 1, '文件-删除', 'db:transfer:files:del', 1724376022, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-08-23 09:20:23', '2024-08-23 14:50:21', 'SmLcpu6c/HIURtJJA/', 0, NULL);
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1724395850, 1709194669, 2, 1, '文件-下载', 'db:transfer:files:down', 1724395850, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-08-23 14:50:51', '2024-08-23 14:50:51', 'SmLcpu6c/FmqK4azt/', 0, NULL);
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1724398262, 1709194669, 2, 1, '文件', 'db:transfer:files', 1724376021, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-08-23 15:31:02', '2024-08-23 15:31:16', 'SmLcpu6c/btVtrbhk/', 0, NULL);
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1724817775, 1709194669, 2, 1, '文件-重命名', 'db:transfer:files:rename', 1724376021, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-08-28 12:02:56', '2024-08-28 12:03:01', 'SmLcpu6c/zu4fvnuA/', 0, NULL);
INSERT INTO `t_sys_resource` (`id`, `pid`, `type`, `status`, `name`, `code`, `weight`, `meta`, `creator_id`, `creator`, `modifier_id`, `modifier`, `create_time`, `update_time`, `ui_path`, `is_deleted`, `delete_time`) VALUES(1724998419, 1709194669, 2, 1, '文件-执行', 'db:transfer:files:run', 1724998419, 'null', 12, 'liuzongyang', 12, 'liuzongyang', '2024-08-30 14:13:39', '2024-08-30 14:13:39', 'SmLcpu6c/qINungml/', 0, NULL);
-- 新增数据库迁移相关的系统配置
DELETE FROM `t_sys_config` WHERE `key` = 'DbBackupRestore';
INSERT INTO `t_sys_config` (`id`, `name`, `key`, `params`, `value`, `remark`, `permission`, `create_time`, `creator_id`, `creator`, `update_time`, `modifier_id`, `modifier`, `is_deleted`, `delete_time`) VALUES(10, '数据库备份恢复', 'DbBackupRestore', '[{"model":"backupPath","name":"备份路径","placeholder":"备份文件存储路径"},{"model":"transferPath","name":"迁移路径","placeholder":"数据库迁移文件存储路径"}]', '{"backupPath":"./db/backup","transferPath":"./db/transfer"}', '数据库备份恢复', 'all', '2023-12-29 09:55:26', 1, 'admin', '2024-08-27 15:22:22', 12, 'liuzongyang', 0, NULL);
-- 数据库迁移到文件
ALTER TABLE `t_db_transfer_task`
ADD COLUMN `task_name` varchar(100) NULL comment '任务名' after `delete_time`;
ADD COLUMN `task_name` varchar(100) NULL comment '任务名',
ADD COLUMN `cron_able` TINYINT(3) NOT NULL DEFAULT 0 comment '是否定时 1是 -1否',
ADD COLUMN `cron` VARCHAR(20) NULL comment '定时任务cron表达式',
ADD COLUMN `task_key` varchar(100) NULL comment '定时任务唯一uuid key',
ADD COLUMN `mode` TINYINT(3) NOT NULL DEFAULT 1 comment '数据迁移方式1、迁移到数据库 2、迁移到文件',
ADD COLUMN `target_file_db_type` varchar(200) NULL comment '目标文件语言类型类型枚举同target_db_type',
ADD COLUMN `status` tinyint(3) NOT NULL DEFAULT '1' comment '启用状态 1启用 -1禁用';
UPDATE `t_db_transfer_task` SET mode = 1 WHERE 1=1;
UPDATE `t_db_transfer_task` SET cron_able = -1 WHERE 1=1;
UPDATE `t_db_transfer_task` SET task_name = '未命名' WHERE task_name = '' or task_name is null;
CREATE TABLE `t_db_transfer_files` (
`id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`is_deleted` tinyint(3) NOT NULL DEFAULT 0 COMMENT '是否删除',
`delete_time` datetime COMMENT '删除时间',
`status` tinyint(3) NOT NULL DEFAULT 1 COMMENT '状态1、执行中 2、执行失败 3、 执行成功',
`task_id` bigint COMMENT '迁移任务ID',
`log_id` bigint COMMENT '日志ID',
`file_db_type` varchar(200) COMMENT 'sql文件数据库类型',
`file_name` varchar(200) COMMENT '显式文件名 默认: 年月日时分秒.zip',
`file_uuid` varchar(50) COMMENT '文件真实uuid拼接后可以下载',
PRIMARY KEY (id)
) COMMENT '数据库迁移文件管理';
ALTER TABLE `t_flow_procdef`
ADD COLUMN `condition` text NULL comment '触发审批的条件计算结果返回1则需要启用该流程';