!84 fix: 修复数据库备份与恢复问题

* refactor dbScheduler
* fix: 按团队名称检索团队
* feat: 创建数据库资源时支持全选数据库
* refactor dbScheduler
* fix: 修复数据库备份与恢复问题
This commit is contained in:
kanzihuang
2024-01-17 08:37:22 +00:00
committed by Coder慌
parent cc3981d99c
commit 94da6df33e
35 changed files with 846 additions and 609 deletions

View File

@@ -22,10 +22,10 @@ type Repo[T model.ModelI] interface {
InsertWithDb(ctx context.Context, db *gorm.DB, e T) error
// 批量新增实体
BatchInsert(ctx context.Context, models any) error
BatchInsert(ctx context.Context, models []T) error
// 使用指定gorm db执行主要用于事务执行
BatchInsertWithDb(ctx context.Context, db *gorm.DB, models any) error
BatchInsertWithDb(ctx context.Context, db *gorm.DB, models []T) error
// 根据实体id更新实体信息
UpdateById(ctx context.Context, e T, columns ...string) error
@@ -93,19 +93,19 @@ func (br *RepoImpl[T]) InsertWithDb(ctx context.Context, db *gorm.DB, e T) error
return gormx.InsertWithDb(db, br.fillBaseInfo(ctx, e))
}
func (br *RepoImpl[T]) BatchInsert(ctx context.Context, es any) error {
func (br *RepoImpl[T]) BatchInsert(ctx context.Context, es []T) error {
if db := contextx.GetDb(ctx); db != nil {
return br.BatchInsertWithDb(ctx, db, es)
}
for _, e := range es.([]T) {
for _, e := range es {
br.fillBaseInfo(ctx, e)
}
return gormx.BatchInsert[T](es)
}
// 使用指定gorm db执行主要用于事务执行
func (br *RepoImpl[T]) BatchInsertWithDb(ctx context.Context, db *gorm.DB, es any) error {
for _, e := range es.([]T) {
func (br *RepoImpl[T]) BatchInsertWithDb(ctx context.Context, db *gorm.DB, es []T) error {
for _, e := range es {
br.fillBaseInfo(ctx, e)
}
return gormx.BatchInsertWithDb[T](db, es)

View File

@@ -135,13 +135,13 @@ func InsertWithDb(db *gorm.DB, model any) error {
}
// 批量插入
func BatchInsert[T any](models any) error {
func BatchInsert[T any](models []T) error {
return BatchInsertWithDb[T](global.Db, models)
}
// 批量插入
func BatchInsertWithDb[T any](db *gorm.DB, models any) error {
return db.CreateInBatches(models, len(models.([]T))).Error
func BatchInsertWithDb[T any](db *gorm.DB, models []T) error {
return db.CreateInBatches(models, len(models)).Error
}
// 根据id更新model更新字段为model中不为空的值即int类型不为0ptr类型不为nil这类字段值

View File

@@ -25,6 +25,35 @@ type Delayable interface {
GetKey() string
}
var _ Delayable = (*wrapper[Job])(nil)
type wrapper[T Job] struct {
key string
deadline time.Time
removed bool
status JobStatus
job T
}
func newWrapper[T Job](job T) *wrapper[T] {
return &wrapper[T]{
key: job.GetKey(),
job: job,
}
}
func (d *wrapper[T]) GetDeadline() time.Time {
return d.deadline
}
func (d *wrapper[T]) GetKey() string {
return d.key
}
func (d *wrapper[T]) Payload() T {
return d.job
}
func NewDelayQueue[T Delayable](cap int) *DelayQueue[T] {
singleDequeue := make(chan struct{}, 1)
singleDequeue <- struct{}{}

View File

@@ -1,11 +1,5 @@
package runner
//var (
// false = errors.New("queue: 队列已满")
// false = errors.New("queue: 队列为空")
// false = errors.New("queue: 元素未找到")
//)
// PriorityQueue 是一个基于小顶堆的优先队列
// 当capacity <= 0时为无界队列切片容量会动态扩缩容
// 当capacity > 0 时,为有界队列,初始化后就固定容量,不会扩缩容

View File

@@ -2,43 +2,43 @@ package runner
import (
"context"
"errors"
"fmt"
"github.com/emirpasic/gods/maps/linkedhashmap"
"mayfly-go/pkg/logx"
"mayfly-go/pkg/utils/timex"
"sync"
"time"
)
var (
ErrNotFound = errors.New("job not found")
ErrExist = errors.New("job already exists")
ErrFinished = errors.New("job already finished")
)
type JobKey = string
type RunFunc func(ctx context.Context)
type NextFunc func() (Job, bool)
type RunnableFunc func(next NextFunc) bool
type RunJobFunc[T Job] func(ctx context.Context, job T)
type NextJobFunc[T Job] func() (T, bool)
type RunnableJobFunc[T Job] func(job T, next NextJobFunc[T]) bool
type ScheduleJobFunc[T Job] func(job T) (deadline time.Time, err error)
type JobStatus int
const (
JobUnknown JobStatus = iota
JobDelay
JobDelaying
JobWaiting
JobRunning
JobRemoved
)
type Job interface {
GetKey() JobKey
GetStatus() JobStatus
SetStatus(status JobStatus)
Run(ctx context.Context)
Runnable(next NextFunc) bool
GetDeadline() time.Time
Schedule() bool
Update(job Job)
}
type iterator[T Job] struct {
index int
data []T
data []*wrapper[T]
zero T
}
@@ -51,19 +51,18 @@ func (iter *iterator[T]) Next() (T, bool) {
return iter.zero, false
}
iter.index++
return iter.data[iter.index], true
return iter.data[iter.index].job, true
}
type array[T Job] struct {
size int
data []T
zero T
data []*wrapper[T]
}
func newArray[T Job](size int) *array[T] {
return &array[T]{
size: size,
data: make([]T, 0, size),
data: make([]*wrapper[T], 0, size),
}
}
@@ -78,7 +77,7 @@ func (a *array[T]) Full() bool {
return len(a.data) >= a.size
}
func (a *array[T]) Append(job T) bool {
func (a *array[T]) Append(job *wrapper[T]) bool {
if len(a.data) >= a.size {
return false
}
@@ -86,20 +85,20 @@ func (a *array[T]) Append(job T) bool {
return true
}
func (a *array[T]) Get(key JobKey) (T, bool) {
func (a *array[T]) Get(key JobKey) (*wrapper[T], bool) {
for _, job := range a.data {
if key == job.GetKey() {
return job, true
}
}
return a.zero, false
return nil, false
}
func (a *array[T]) Remove(key JobKey) {
length := len(a.data)
for i, elm := range a.data {
if key == elm.GetKey() {
a.data[i], a.data[length-1] = a.data[length-1], a.zero
a.data[i], a.data[length-1] = a.data[length-1], nil
a.data = a.data[:length-1]
return
}
@@ -107,47 +106,76 @@ func (a *array[T]) Remove(key JobKey) {
}
type Runner[T Job] struct {
maxRunning int
waiting *linkedhashmap.Map
running *array[T]
runnable func(job T, iterateRunning func() (T, bool)) bool
mutex sync.Mutex
wg sync.WaitGroup
context context.Context
cancel context.CancelFunc
zero T
signal chan struct{}
all map[string]T
delayQueue *DelayQueue[T]
maxRunning int
waiting *linkedhashmap.Map
running *array[T]
runJob RunJobFunc[T]
runnableJob RunnableJobFunc[T]
scheduleJob ScheduleJobFunc[T]
mutex sync.Mutex
wg sync.WaitGroup
context context.Context
cancel context.CancelFunc
zero T
signal chan struct{}
all map[JobKey]*wrapper[T]
delayQueue *DelayQueue[*wrapper[T]]
}
func NewRunner[T Job](maxRunning int) *Runner[T] {
type Opt[T Job] func(runner *Runner[T])
func WithRunnableJob[T Job](runnableJob RunnableJobFunc[T]) Opt[T] {
return func(runner *Runner[T]) {
runner.runnableJob = runnableJob
}
}
func WithScheduleJob[T Job](scheduleJob ScheduleJobFunc[T]) Opt[T] {
return func(runner *Runner[T]) {
runner.scheduleJob = scheduleJob
}
}
func NewRunner[T Job](maxRunning int, runJob RunJobFunc[T], opts ...Opt[T]) *Runner[T] {
ctx, cancel := context.WithCancel(context.Background())
runner := &Runner[T]{
maxRunning: maxRunning,
all: make(map[string]T, maxRunning),
all: make(map[string]*wrapper[T], maxRunning),
waiting: linkedhashmap.New(),
running: newArray[T](maxRunning),
context: ctx,
cancel: cancel,
signal: make(chan struct{}, 1),
delayQueue: NewDelayQueue[T](0),
delayQueue: NewDelayQueue[*wrapper[T]](0),
}
runner.runJob = runJob
for _, opt := range opts {
opt(runner)
}
if runner.runnableJob == nil {
runner.runnableJob = func(job T, _ NextJobFunc[T]) bool {
return true
}
}
runner.wg.Add(maxRunning + 1)
for i := 0; i < maxRunning; i++ {
go runner.run()
}
go func() {
defer runner.wg.Done()
timex.SleepWithContext(runner.context, time.Second*10)
for runner.context.Err() == nil {
job, ok := runner.delayQueue.Dequeue(ctx)
wrap, ok := runner.delayQueue.Dequeue(ctx)
if !ok {
continue
}
runner.mutex.Lock()
runner.waiting.Put(job.GetKey(), job)
job.SetStatus(JobWaiting)
if old, ok := runner.all[wrap.key]; !ok || wrap != old {
runner.mutex.Unlock()
continue
}
runner.waiting.Put(wrap.key, wrap)
wrap.status = JobWaiting
runner.trigger()
runner.mutex.Unlock()
}
@@ -166,144 +194,155 @@ func (r *Runner[T]) run() {
for r.context.Err() == nil {
select {
case <-r.signal:
job, ok := r.pickRunnable()
wrap, ok := r.pickRunnableJob()
if !ok {
continue
}
r.doRun(job)
r.afterRun(job)
r.doRun(wrap)
r.afterRun(wrap)
case <-r.context.Done():
}
}
}
func (r *Runner[T]) doRun(job T) {
func (r *Runner[T]) doRun(wrap *wrapper[T]) {
defer func() {
if err := recover(); err != nil {
logx.Error(fmt.Sprintf("failed to run job: %v", err))
}
}()
job.Run(r.context)
r.runJob(r.context, wrap.job)
}
func (r *Runner[T]) afterRun(job T) {
func (r *Runner[T]) afterRun(wrap *wrapper[T]) {
r.mutex.Lock()
defer r.mutex.Unlock()
key := job.GetKey()
r.running.Remove(key)
r.running.Remove(wrap.key)
delete(r.all, wrap.key)
wrap.status = JobUnknown
r.trigger()
switch job.GetStatus() {
case JobRunning:
r.schedule(r.context, job)
case JobRemoved:
delete(r.all, key)
default:
panic(fmt.Sprintf("invalid job status %v occurred after run", job.GetStatus()))
if wrap.removed {
return
}
deadline, err := r.doScheduleJob(wrap.job, true)
if err != nil {
return
}
_ = r.schedule(r.context, deadline, wrap.job)
}
func (r *Runner[T]) pickRunnable() (T, bool) {
func (r *Runner[T]) doScheduleJob(job T, finished bool) (time.Time, error) {
if r.scheduleJob == nil {
if finished {
return time.Time{}, ErrFinished
}
return time.Now(), nil
}
return r.scheduleJob(job)
}
func (r *Runner[T]) pickRunnableJob() (*wrapper[T], bool) {
r.mutex.Lock()
defer r.mutex.Unlock()
iter := r.running.Iterator()
var runnable T
var runnable *wrapper[T]
ok := r.waiting.Any(func(key interface{}, value interface{}) bool {
job := value.(T)
wrap := value.(*wrapper[T])
iter.Begin()
if job.Runnable(func() (Job, bool) { return iter.Next() }) {
if r.runnableJob(wrap.job, iter.Next) {
if r.running.Full() {
return false
}
r.waiting.Remove(key)
r.running.Append(job)
job.SetStatus(JobRunning)
r.running.Append(wrap)
wrap.status = JobRunning
if !r.running.Full() && !r.waiting.Empty() {
r.trigger()
}
runnable = job
runnable = wrap
return true
}
return false
})
if !ok {
return r.zero, false
return nil, false
}
return runnable, true
}
func (r *Runner[T]) schedule(ctx context.Context, job T) {
if !job.Schedule() {
delete(r.all, job.GetKey())
job.SetStatus(JobRemoved)
return
func (r *Runner[T]) schedule(ctx context.Context, deadline time.Time, job T) error {
wrap := newWrapper(job)
wrap.deadline = deadline
if wrap.deadline.After(time.Now()) {
r.delayQueue.Enqueue(ctx, wrap)
wrap.status = JobDelaying
} else {
r.waiting.Put(wrap.key, wrap)
wrap.status = JobWaiting
r.trigger()
}
r.delayQueue.Enqueue(ctx, job)
job.SetStatus(JobDelay)
r.all[wrap.key] = wrap
return nil
}
//func (r *Runner[T]) Schedule(ctx context.Context, job T) {
// r.mutex.Lock()
// defer r.mutex.Unlock()
//
// switch job.GetStatus() {
// case JobUnknown:
// case JobDelay:
// r.delayQueue.Remove(ctx, job.GetKey())
// case JobWaiting:
// r.waiting.Remove(job)
// case JobRunning:
// // 标记为 removed, 任务执行完成后再删除
// return
// case JobRemoved:
// return
// }
// r.schedule(ctx, job)
//}
func (r *Runner[T]) Add(ctx context.Context, job T) error {
r.mutex.Lock()
defer r.mutex.Unlock()
if _, ok := r.all[job.GetKey()]; ok {
return nil
return ErrExist
}
r.schedule(ctx, job)
return nil
deadline, err := r.doScheduleJob(job, false)
if err != nil {
return err
}
return r.schedule(ctx, deadline, job)
}
func (r *Runner[T]) UpdateOrAdd(ctx context.Context, job T) error {
r.mutex.Lock()
defer r.mutex.Unlock()
if old, ok := r.all[job.GetKey()]; ok {
old.Update(job)
job = old
wrap, ok := r.all[job.GetKey()]
if ok {
wrap.job.Update(job)
switch wrap.status {
case JobDelaying:
r.delayQueue.Remove(ctx, wrap.key)
delete(r.all, wrap.key)
case JobWaiting:
r.waiting.Remove(wrap.key)
delete(r.all, wrap.key)
case JobRunning:
return nil
default:
}
}
r.schedule(ctx, job)
return nil
deadline, err := r.doScheduleJob(job, false)
if err != nil {
return err
}
return r.schedule(ctx, deadline, wrap.job)
}
func (r *Runner[T]) StartNow(ctx context.Context, job T) error {
r.mutex.Lock()
defer r.mutex.Unlock()
key := job.GetKey()
if old, ok := r.all[key]; ok {
job = old
if job.GetStatus() == JobDelay {
r.delayQueue.Remove(ctx, key)
r.waiting.Put(key, job)
r.trigger()
if wrap, ok := r.all[job.GetKey()]; ok {
switch wrap.status {
case JobDelaying:
r.delayQueue.Remove(ctx, wrap.key)
delete(r.all, wrap.key)
case JobWaiting, JobRunning:
return nil
default:
}
return nil
}
r.all[key] = job
r.waiting.Put(key, job)
r.trigger()
return nil
return r.schedule(ctx, time.Now(), job)
}
func (r *Runner[T]) trigger() {
@@ -317,23 +356,21 @@ func (r *Runner[T]) Remove(ctx context.Context, key JobKey) error {
r.mutex.Lock()
defer r.mutex.Unlock()
job, ok := r.all[key]
wrap, ok := r.all[key]
if !ok {
return nil
return ErrNotFound
}
switch job.GetStatus() {
case JobUnknown:
panic(fmt.Sprintf("invalid job status %v occurred after added", job.GetStatus()))
case JobDelay:
switch wrap.status {
case JobDelaying:
r.delayQueue.Remove(ctx, key)
delete(r.all, key)
case JobWaiting:
r.waiting.Remove(key)
delete(r.all, key)
case JobRunning:
// 标记为 removed, 任务执行完成后再删除
case JobRemoved:
return nil
// 统一标记为 removed, 任务执行完成后再删除
wrap.removed = true
default:
}
delete(r.all, key)
job.SetStatus(JobRemoved)
return nil
}

View File

@@ -2,6 +2,7 @@ package runner
import (
"context"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"mayfly-go/pkg/utils/timex"
"sync"
@@ -11,72 +12,35 @@ import (
var _ Job = &testJob{}
func newTestJob(key string, runTime time.Duration) *testJob {
func newTestJob(key string) *testJob {
return &testJob{
deadline: time.Now(),
Key: key,
run: func(ctx context.Context) {
timex.SleepWithContext(ctx, runTime)
},
Key: key,
}
}
type testJob struct {
run RunFunc
Key JobKey
status JobStatus
ran bool
deadline time.Time
Key JobKey
status int
}
func (t *testJob) Update(_ Job) {
}
func (t *testJob) GetDeadline() time.Time {
return t.deadline
}
func (t *testJob) Schedule() bool {
return !t.ran
}
func (t *testJob) Run(ctx context.Context) {
if t.run == nil {
return
}
t.run(ctx)
t.ran = true
}
func (t *testJob) Runnable(_ NextFunc) bool {
return true
}
func (t *testJob) Update(_ Job) {}
func (t *testJob) GetKey() JobKey {
return t.Key
}
func (t *testJob) GetStatus() JobStatus {
return t.status
}
func (t *testJob) SetStatus(status JobStatus) {
t.status = status
}
func TestRunner_Close(t *testing.T) {
runner := NewRunner[*testJob](1)
signal := make(chan struct{}, 1)
waiting := sync.WaitGroup{}
waiting.Add(1)
runner := NewRunner[*testJob](1, func(ctx context.Context, job *testJob) {
waiting.Done()
timex.SleepWithContext(ctx, time.Hour)
signal <- struct{}{}
})
go func() {
job := &testJob{
Key: "close",
run: func(ctx context.Context) {
waiting.Done()
timex.SleepWithContext(ctx, time.Hour)
signal <- struct{}{}
},
}
_ = runner.Add(context.Background(), job)
}()
@@ -95,55 +59,65 @@ func TestRunner_AddJob(t *testing.T) {
type testCase struct {
name string
job *testJob
want bool
want error
}
testCases := []testCase{
{
name: "first job",
job: newTestJob("single", time.Hour),
want: true,
job: newTestJob("single"),
want: nil,
},
{
name: "second job",
job: newTestJob("dual", time.Hour),
want: true,
},
{
name: "non repetitive job",
job: newTestJob("single", time.Hour),
want: true,
job: newTestJob("dual"),
want: nil,
},
{
name: "repetitive job",
job: newTestJob("dual", time.Hour),
want: true,
job: newTestJob("dual"),
want: ErrExist,
},
}
runner := NewRunner[*testJob](1)
runner := NewRunner[*testJob](1, func(ctx context.Context, job *testJob) {
timex.SleepWithContext(ctx, time.Hour)
})
defer runner.Close()
for _, tc := range testCases {
err := runner.Add(context.Background(), tc.job)
if tc.want != nil {
require.ErrorIs(t, err, tc.want)
continue
}
require.NoError(t, err)
}
}
func TestJob_UpdateStatus(t *testing.T) {
const d = time.Millisecond * 20
runner := NewRunner[*testJob](1)
running := newTestJob("running", d*2)
waiting := newTestJob("waiting", d*2)
_ = runner.Add(context.Background(), running)
_ = runner.Add(context.Background(), waiting)
const (
unknown = iota
running
finished
)
runner := NewRunner[*testJob](1, func(ctx context.Context, job *testJob) {
job.status = running
timex.SleepWithContext(ctx, d*2)
job.status = finished
})
first := newTestJob("first")
second := newTestJob("second")
_ = runner.Add(context.Background(), first)
_ = runner.Add(context.Background(), second)
time.Sleep(d)
require.Equal(t, JobRunning, running.status)
require.Equal(t, JobWaiting, waiting.status)
assert.Equal(t, running, first.status)
assert.Equal(t, unknown, second.status)
time.Sleep(d * 2)
require.Equal(t, JobRemoved, running.status)
require.Equal(t, JobRunning, waiting.status)
assert.Equal(t, finished, first.status)
assert.Equal(t, running, second.status)
time.Sleep(d * 2)
require.Equal(t, JobRemoved, running.status)
require.Equal(t, JobRemoved, waiting.status)
assert.Equal(t, finished, first.status)
assert.Equal(t, finished, second.status)
}