fix: 移除隧道连接时检测是否正在使用

This commit is contained in:
meilin.huang
2025-05-26 22:33:51 +08:00
parent d6280ea280
commit e0c01d4561
19 changed files with 314 additions and 161 deletions

View File

@@ -39,7 +39,7 @@ require (
gopkg.in/yaml.v3 v3.0.1
// gorm
gorm.io/driver/mysql v1.5.7
gorm.io/gorm v1.26.1
gorm.io/gorm v1.30.0
)
require (

View File

@@ -9,6 +9,7 @@ import (
_ "mayfly-go/internal/db/dbm/oracle"
_ "mayfly-go/internal/db/dbm/postgres"
_ "mayfly-go/internal/db/dbm/sqlite"
"mayfly-go/internal/machine/mcm"
"mayfly-go/pkg/logx"
"mayfly-go/pkg/pool"
)
@@ -17,6 +18,25 @@ var (
poolGroup = pool.NewPoolGroup[*dbi.DbConn]()
)
func init() {
mcm.AddCheckSshTunnelMachineUseFunc(func(machineId int) bool {
items := poolGroup.AllPool()
for _, v := range items {
if v.Stats().TotalConns == 0 {
continue // 连接池中没有连接,跳过
}
conn, err := v.Get(context.Background())
if err != nil {
continue // 获取连接失败,跳过
}
if conn.Info.SshTunnelMachineId == machineId {
return true
}
}
return false
})
}
// GetDbConn 从连接池中获取连接信息
func GetDbConn(ctx context.Context, dbId uint64, database string, getDbInfo func() (*dbi.DbInfo, error)) (*dbi.DbConn, error) {
connId := dbi.GetDbConnId(dbId, database)

View File

@@ -8,6 +8,7 @@ import (
"mayfly-go/internal/es/domain/repository"
"mayfly-go/internal/es/esm/esi"
"mayfly-go/internal/es/imsg"
"mayfly-go/internal/machine/mcm"
"mayfly-go/internal/pkg/consts"
tagapp "mayfly-go/internal/tag/application"
tagdto "mayfly-go/internal/tag/application/dto"
@@ -40,6 +41,25 @@ var _ Instance = &instanceAppImpl{}
var poolGroup = pool.NewPoolGroup[*esi.EsConn]()
func init() {
mcm.AddCheckSshTunnelMachineUseFunc(func(machineId int) bool {
items := poolGroup.AllPool()
for _, v := range items {
if v.Stats().TotalConns == 0 {
continue // 连接池中没有连接,跳过
}
conn, err := v.Get(context.Background())
if err != nil {
continue // 获取连接失败,跳过
}
if conn.Info.SshTunnelMachineId == machineId {
return true
}
}
return false
})
}
type instanceAppImpl struct {
base.AppImpl[*entity.EsInstance, repository.EsInstance]
@@ -234,6 +254,8 @@ func (app *instanceAppImpl) Delete(ctx context.Context, instanceId uint64) error
return errorx.NewBiz("db instnace not found")
}
poolGroup.Close(fmt.Sprintf("es-%d", instanceId))
return app.Tx(ctx, func(ctx context.Context) error {
// 删除该实例
return app.DeleteById(ctx, instanceId)

View File

@@ -9,6 +9,27 @@ var (
poolGroup = pool.NewPoolGroup[*Cli]()
)
func init() {
AddCheckSshTunnelMachineUseFunc(func(machineId int) bool {
// 遍历所有redis连接实例若存在redis实例使用该ssh隧道机器则返回true表示还在使用中...
items := poolGroup.AllPool()
for _, v := range items {
if v.Stats().TotalConns == 0 {
continue // 连接池中没有连接,跳过
}
cli, err := v.Get(context.Background())
if err != nil {
continue // 获取连接失败,跳过
}
sshTunnelMachine := cli.Info.SshTunnelMachine
if sshTunnelMachine != nil && sshTunnelMachine.Id == uint64(machineId) {
return true
}
}
return false
})
}
// 从缓存中获取客户端信息,不存在则回调获取机器信息函数,并新建。
// @param 机器的授权凭证名
func GetMachineCli(ctx context.Context, authCertName string, getMachine func(string) (*MachineInfo, error)) (*Cli, error) {
@@ -39,7 +60,7 @@ func DeleteCli(id uint64) {
continue
}
if conn.Info.Id == id {
pool.Close()
poolGroup.Close(conn.Info.AuthCertName)
}
}
}

View File

@@ -10,10 +10,15 @@ import (
"mayfly-go/pkg/utils/netx"
"net"
"sync"
"time"
"golang.org/x/crypto/ssh"
)
// type SshTunnelAble interface {
// GetSshTunnelMachineId() int
// }
var (
// 所有检测ssh隧道机器是否被使用的函数
checkSshTunnelMachineHasUseFuncs []CheckSshTunnelMachineHasUseFunc
@@ -132,7 +137,20 @@ func GetSshTunnelMachine(ctx context.Context, machineId int, getMachine func(uin
logx.Infof("connect to the ssh tunnel machine for the first time[%d][%s:%d]", machineId, mi.Ip, mi.Port)
return stm, err
})
}, pool.WithIdleTimeout[*SshTunnelMachine](50*time.Minute), pool.WithOnConnClose(func(conn *SshTunnelMachine) error {
mid := int(conn.mi.Id)
logx.Debugf("periodically check if the ssh tunnel machine [%d] is still in use...", mid)
for _, checkUseFunc := range checkSshTunnelMachineHasUseFuncs {
// 如果一个在使用则返回不关闭,不继续后续检查
if checkUseFunc(mid) {
return fmt.Errorf("ssh tunnel machine [%s] is still in use", conn.mi.Name)
}
}
return nil
}))
if err != nil {
return nil, err
}
@@ -142,18 +160,19 @@ func GetSshTunnelMachine(ctx context.Context, machineId int, getMachine func(uin
// 关闭ssh隧道机器的指定隧道
func CloseSshTunnelMachine(machineId uint64, tunnelId string) {
//sshTunnelMachine := mcIdPool[machineId]
//if sshTunnelMachine == nil {
// return
//}
//
//sshTunnelMachine.mutex.Lock()
//defer sshTunnelMachine.mutex.Unlock()
//t := sshTunnelMachine.tunnels[tunnelId]
//if t != nil {
// t.Close()
// delete(sshTunnelMachine.tunnels, tunnelId)
//}
sshTunnelMachinePool, ok := tunnelPoolGroup.Get(fmt.Sprintf("machine-tunnel-%d", machineId))
if !ok {
return
}
sshTunnelMachine, err := sshTunnelMachinePool.Get(context.Background())
if err != nil {
return
}
t := sshTunnelMachine.tunnels[tunnelId]
if t != nil {
t.Close()
delete(sshTunnelMachine.tunnels, tunnelId)
}
}
type Tunnel struct {

View File

@@ -2,6 +2,7 @@ package mgm
import (
"context"
"mayfly-go/internal/machine/mcm"
"mayfly-go/pkg/pool"
)
@@ -9,6 +10,25 @@ var (
poolGroup = pool.NewPoolGroup[*MongoConn]()
)
func init() {
mcm.AddCheckSshTunnelMachineUseFunc(func(machineId int) bool {
items := poolGroup.AllPool()
for _, v := range items {
if v.Stats().TotalConns == 0 {
continue // 连接池中没有连接,跳过
}
conn, err := v.Get(context.Background())
if err != nil {
continue // 获取连接失败,跳过
}
if conn.Info.SshTunnelMachineId == machineId {
return true
}
}
return false
})
}
// 从缓存中获取mongo连接信息, 若缓存中不存在则会使用回调函数获取mongoInfo进行连接并缓存
func GetMongoConn(ctx context.Context, mongoId uint64, getMongoInfo func() (*MongoInfo, error)) (*MongoConn, error) {
pool, err := poolGroup.GetCachePool(getConnId(mongoId), func() (*MongoConn, error) {

View File

@@ -2,16 +2,34 @@ package rdm
import (
"context"
"mayfly-go/internal/machine/mcm"
"mayfly-go/pkg/pool"
)
func init() {
}
var (
poolGroup = pool.NewPoolGroup[*RedisConn]()
)
func init() {
mcm.AddCheckSshTunnelMachineUseFunc(func(machineId int) bool {
// 遍历所有redis连接实例若存在redis实例使用该ssh隧道机器则返回true表示还在使用中...
items := poolGroup.AllPool()
for _, v := range items {
if v.Stats().TotalConns == 0 {
continue // 连接池中没有连接,跳过
}
rc, err := v.Get(context.Background())
if err != nil {
continue // 获取连接失败,跳过
}
if rc.Info.SshTunnelMachineId == machineId {
return true
}
}
return false
})
}
// 从缓存中获取redis连接信息, 若缓存中不存在则会使用回调函数获取redisInfo进行连接并缓存
func GetRedisConn(ctx context.Context, redisId uint64, db int, getRedisInfo func() (*RedisInfo, error)) (*RedisConn, error) {
p, err := poolGroup.GetCachePool(getConnId(redisId, db), func() (*RedisConn, error) {

View File

@@ -8,35 +8,27 @@ import (
"time"
)
var CachePoolDefaultConfig = PoolConfig{
MaxConns: 1,
IdleTimeout: 60 * time.Minute,
WaitTimeout: 10 * time.Second,
HealthCheckInterval: 10 * time.Minute,
}
type cacheEntry[T Conn] struct {
conn T
lastActive time.Time
}
func (e *cacheEntry[T]) Close() {
if err := e.conn.Close(); err != nil {
logx.Errorf("cache pool - closing connection error: %v", err)
}
}
type CachePool[T Conn] struct {
factory func() (T, error)
mu sync.RWMutex
cache map[string]*cacheEntry[T] // 使用字符串键的缓存
config PoolConfig
config PoolConfig[T]
closeCh chan struct{}
closed bool
}
func NewCachePool[T Conn](factory func() (T, error), opts ...Option) *CachePool[T] {
config := CachePoolDefaultConfig
func NewCachePool[T Conn](factory func() (T, error), opts ...Option[T]) *CachePool[T] {
config := PoolConfig[T]{
MaxConns: 1,
IdleTimeout: 60 * time.Minute,
WaitTimeout: 10 * time.Second,
HealthCheckInterval: 10 * time.Minute,
}
for _, opt := range opts {
opt(&config)
}
@@ -81,8 +73,9 @@ func (p *CachePool[T]) Get(ctx context.Context) (T, error) {
return entry.conn, nil
}
// 清理超时连接
entry.Close()
delete(p.cache, key)
if !p.closeConn(key, entry, false) {
return entry.conn, nil
}
}
// 创建新连接
@@ -151,8 +144,9 @@ func (p *CachePool[T]) Close() {
p.closed = true
close(p.closeCh)
for _, entry := range p.cache {
entry.Close()
for key, entry := range p.cache {
// 强制关闭连接
p.closeConn(key, entry, true)
}
// 触发关闭回调
@@ -212,13 +206,33 @@ func (p *CachePool[T]) cleanupIdle() {
cutoff := time.Now().Add(-p.config.IdleTimeout)
for key, entry := range p.cache {
if entry.lastActive.Before(cutoff) {
entry.Close()
delete(p.cache, key)
if entry.lastActive.Before(cutoff) || entry.conn.Ping() != nil {
logx.Infof("cache pool - cleaning up idle connection, key: %s", key)
// 如果连接超时或不可用,则关闭连接
p.closeConn(key, entry, false)
}
}
}
func (p *CachePool[T]) closeConn(key string, entry *cacheEntry[T], force bool) bool {
if !force {
// 如果不是强制关闭且有连接关闭回调,则调用回调
// 如果回调返回错误,则不关闭连接
if onConnClose := p.config.OnConnClose; onConnClose != nil {
if err := onConnClose(entry.conn); err != nil {
logx.Infof("cache pool - connection close callback returned error, skip closing connection:: %v", err)
return false
}
}
}
if err := entry.conn.Close(); err != nil {
logx.Errorf("cache pool - closing connection error: %v", err)
}
delete(p.cache, key)
return true
}
// 生成缓存键
func generateCacheKey() string {
return stringx.RandUUID()

View File

@@ -10,13 +10,6 @@ import (
"time"
)
var ChanPoolDefaultConfig = PoolConfig{
MaxConns: 5,
IdleTimeout: 60 * time.Minute,
WaitTimeout: 10 * time.Second,
HealthCheckInterval: 10 * time.Minute,
}
// chanConn 封装连接及其元数据
type chanConn[T Conn] struct {
conn T
@@ -41,7 +34,7 @@ type ChanPool[T Conn] struct {
mu sync.RWMutex
factory func() (T, error)
idleConns chan *chanConn[T]
config PoolConfig
config PoolConfig[T]
currentConns int32
stats PoolStats
closeChan chan struct{} // 用于关闭健康检查 goroutine
@@ -56,9 +49,14 @@ type PoolStats struct {
WaitCount int64 // 等待连接次数
}
func NewChannelPool[T Conn](factory func() (T, error), opts ...Option) *ChanPool[T] {
func NewChannelPool[T Conn](factory func() (T, error), opts ...Option[T]) *ChanPool[T] {
// 1. 初始化配置(使用默认值 + Option 覆盖)
config := ChanPoolDefaultConfig
config := PoolConfig[T]{
MaxConns: 5,
IdleTimeout: 60 * time.Minute,
WaitTimeout: 10 * time.Second,
HealthCheckInterval: 10 * time.Minute,
}
for _, opt := range opts {
opt(&config)
}

View File

@@ -8,48 +8,57 @@ import (
var ErrPoolClosed = errors.New("pool is closed")
// PoolConfig 连接池配置
type PoolConfig struct {
type PoolConfig[T Conn] struct {
MaxConns int // 最大连接数
IdleTimeout time.Duration // 空闲连接超时时间
WaitTimeout time.Duration // 获取连接超时时间
HealthCheckInterval time.Duration // 健康检查间隔
OnPoolClose func() error // 连接池关闭时的回调
OnConnClose func(conn T) error // 连接关闭时的回调,若err != nil则不关闭连接
}
// Option 函数类型,用于配置 Pool
type Option func(*PoolConfig)
type Option[T Conn] func(*PoolConfig[T])
// WithMaxConns 设置最大连接数
func WithMaxConns(maxConns int) Option {
return func(c *PoolConfig) {
func WithMaxConns[T Conn](maxConns int) Option[T] {
return func(c *PoolConfig[T]) {
c.MaxConns = maxConns
}
}
// WithIdleTimeout 设置空闲超时
func WithIdleTimeout(timeout time.Duration) Option {
return func(c *PoolConfig) {
func WithIdleTimeout[T Conn](timeout time.Duration) Option[T] {
return func(c *PoolConfig[T]) {
c.IdleTimeout = timeout
}
}
// WithWaitTimeout 设置等待超时
func WithWaitTimeout(timeout time.Duration) Option {
return func(c *PoolConfig) {
func WithWaitTimeout[T Conn](timeout time.Duration) Option[T] {
return func(c *PoolConfig[T]) {
c.WaitTimeout = timeout
}
}
// WithHealthCheckInterval 设置健康检查间隔
func WithHealthCheckInterval(interval time.Duration) Option {
return func(c *PoolConfig) {
func WithHealthCheckInterval[T Conn](interval time.Duration) Option[T] {
return func(c *PoolConfig[T]) {
c.HealthCheckInterval = interval
}
}
// WithOnPoolClose 设置连接池关闭回调
func WithOnPoolClose(fn func() error) Option {
return func(c *PoolConfig) {
func WithOnPoolClose[T Conn](fn func() error) Option[T] {
return func(c *PoolConfig[T]) {
c.OnPoolClose = fn
}
}
// WithOnConnClose 设置连接关闭回调, 若返回的错误不为nil则不关闭连接
func WithOnConnClose[T Conn](fn func(conn T) error) Option[T] {
return func(c *PoolConfig[T]) {
c.OnConnClose = fn
}
}

View File

@@ -23,7 +23,7 @@ func NewPoolGroup[T Conn]() *PoolGroup[T] {
func (pg *PoolGroup[T]) GetOrCreate(
key string,
poolFactory func() Pool[T],
opts ...Option,
opts ...Option[T],
) (Pool[T], error) {
// 先尝试读锁获取
pg.mu.RLock()
@@ -63,19 +63,29 @@ func (pg *PoolGroup[T]) GetOrCreate(
}
// GetChanPool 获取或创建 ChannelPool 类型连接池
func (pg *PoolGroup[T]) GetChanPool(key string, factory func() (T, error), opts ...Option) (Pool[T], error) {
func (pg *PoolGroup[T]) GetChanPool(key string, factory func() (T, error), opts ...Option[T]) (Pool[T], error) {
return pg.GetOrCreate(key, func() Pool[T] {
return NewChannelPool(factory, opts...)
}, opts...)
}
// GetCachePool 获取或创建 CachePool 类型连接池
func (pg *PoolGroup[T]) GetCachePool(key string, factory func() (T, error), opts ...Option) (Pool[T], error) {
func (pg *PoolGroup[T]) GetCachePool(key string, factory func() (T, error), opts ...Option[T]) (Pool[T], error) {
return pg.GetOrCreate(key, func() Pool[T] {
return NewCachePool(factory, opts...)
}, opts...)
}
// Get 获取指定 key 的连接池
func (pg *PoolGroup[T]) Get(key string) (Pool[T], bool) {
pg.mu.RLock()
defer pg.mu.RUnlock()
if p, ok := pg.poolGroup[key]; ok {
return p, true
}
return nil, false
}
func (pg *PoolGroup[T]) Close(key string) error {
pg.mu.Lock()
defer pg.mu.Unlock()

View File

@@ -88,10 +88,10 @@ func newMockConn(id int) *mockConn {
func TestChanPool_Basic(t *testing.T) {
var idGen int
pool := NewChannelPool(func() (Conn, error) {
pool := NewChannelPool(func() (*mockConn, error) {
idGen++
return newMockConn(idGen), nil
}, WithMaxConns(2), WithIdleTimeout(time.Second))
}, WithMaxConns[*mockConn](2), WithIdleTimeout[*mockConn](time.Second))
ctx := context.Background()
conn1, _ := pool.Get(ctx)
@@ -112,9 +112,9 @@ func TestChanPool_Basic(t *testing.T) {
}
func TestChanPool_WaitTimeout(t *testing.T) {
pool := NewChannelPool(func() (Conn, error) {
pool := NewChannelPool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(1), WithWaitTimeout(100*time.Millisecond))
}, WithMaxConns[*mockConn](1), WithWaitTimeout[*mockConn](100*time.Millisecond))
ctx := context.Background()
conn1, _ := pool.Get(ctx)
@@ -132,9 +132,9 @@ func TestChanPool_WaitTimeout(t *testing.T) {
}
func TestChanPool_ContextCancel(t *testing.T) {
pool := NewChannelPool(func() (Conn, error) {
pool := NewChannelPool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(1))
}, WithMaxConns[*mockConn](1))
ctx, cancel := context.WithCancel(context.Background())
conn, _ := pool.Get(ctx)
@@ -145,9 +145,9 @@ func TestChanPool_ContextCancel(t *testing.T) {
}
func TestChanPool_Resize(t *testing.T) {
pool := NewChannelPool(func() (Conn, error) {
pool := NewChannelPool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(2))
}, WithMaxConns[*mockConn](2))
ctx := context.Background()
conn1, _ := pool.Get(ctx)
conn2, _ := pool.Get(ctx)
@@ -158,9 +158,9 @@ func TestChanPool_Resize(t *testing.T) {
}
func TestChanPool_HealthCheck(t *testing.T) {
pool := NewChannelPool(func() (Conn, error) {
pool := NewChannelPool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(1), WithIdleTimeout(10*time.Millisecond), WithHealthCheckInterval(10*time.Millisecond))
}, WithMaxConns[*mockConn](1), WithIdleTimeout[*mockConn](10*time.Millisecond), WithHealthCheckInterval[*mockConn](10*time.Millisecond))
ctx := context.Background()
conn, _ := pool.Get(ctx)
_ = pool.Put(conn)
@@ -176,10 +176,10 @@ func TestChanPool_HealthCheck(t *testing.T) {
func TestCachePool_Basic(t *testing.T) {
var idGen int
pool := NewCachePool(func() (Conn, error) {
pool := NewCachePool(func() (*mockConn, error) {
idGen++
return newMockConn(idGen), nil
}, WithMaxConns(2), WithIdleTimeout(time.Second))
}, WithMaxConns[*mockConn](2), WithIdleTimeout[*mockConn](time.Second))
ctx := context.Background()
conn1, _ := pool.Get(ctx)
@@ -193,9 +193,9 @@ func TestCachePool_Basic(t *testing.T) {
}
func TestCachePool_TimeoutCleanup(t *testing.T) {
pool := NewCachePool(func() (Conn, error) {
pool := NewCachePool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(1), WithIdleTimeout(10*time.Millisecond), WithHealthCheckInterval(10*time.Millisecond))
}, WithMaxConns[*mockConn](1), WithIdleTimeout[*mockConn](10*time.Millisecond), WithHealthCheckInterval[*mockConn](10*time.Millisecond))
ctx := context.Background()
conn, _ := pool.Get(ctx)
_ = pool.Put(conn)
@@ -209,10 +209,10 @@ func TestCachePool_TimeoutCleanup(t *testing.T) {
func TestCachePool_OverMaxConns(t *testing.T) {
var idGen int
pool := NewCachePool(func() (Conn, error) {
pool := NewCachePool(func() (*mockConn, error) {
idGen++
return newMockConn(idGen), nil
}, WithMaxConns(1))
}, WithMaxConns[*mockConn](1))
ctx := context.Background()
conn1, _ := pool.Get(ctx)
_ = pool.Put(conn1)
@@ -231,9 +231,9 @@ func TestCachePool_OverMaxConns(t *testing.T) {
}
func TestCachePool_Resize(t *testing.T) {
pool := NewCachePool(func() (Conn, error) {
pool := NewCachePool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(2))
}, WithMaxConns[*mockConn](2))
ctx := context.Background()
conn1, _ := pool.Get(ctx)
_ = pool.Put(conn1)
@@ -288,9 +288,9 @@ func TestPoolGroup_Concurrent(t *testing.T) {
// ========== 压力测试 ==========
func BenchmarkChanPool_Concurrent(b *testing.B) {
pool := NewChannelPool(func() (Conn, error) {
pool := NewChannelPool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(100))
}, WithMaxConns[*mockConn](100))
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
@@ -307,9 +307,9 @@ func BenchmarkChanPool_Concurrent(b *testing.B) {
}
func BenchmarkCachePool_Concurrent(b *testing.B) {
pool := NewCachePool(func() (Conn, error) {
pool := NewCachePool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(100))
}, WithMaxConns[*mockConn](100))
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
@@ -332,9 +332,9 @@ func TestChanPool_Stress(t *testing.T) {
iterations = 1000
)
pool := NewChannelPool(func() (Conn, error) {
pool := NewChannelPool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(20), WithWaitTimeout(time.Second))
}, WithMaxConns[*mockConn](20), WithWaitTimeout[*mockConn](time.Second))
var wg sync.WaitGroup
var errCount int32
@@ -389,9 +389,9 @@ func TestCachePool_Stress(t *testing.T) {
iterations = 1000
)
pool := NewCachePool(func() (Conn, error) {
pool := NewCachePool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(20), WithIdleTimeout(time.Minute))
}, WithMaxConns[*mockConn](20), WithIdleTimeout[*mockConn](time.Minute))
var wg sync.WaitGroup
var errCount int32
@@ -430,11 +430,11 @@ func TestCachePool_Stress(t *testing.T) {
// 测试连接池在连接失效时的行为
func TestChanPool_InvalidConn(t *testing.T) {
pool := NewChannelPool(func() (Conn, error) {
pool := NewChannelPool(func() (*mockConn, error) {
conn := newMockConn(1)
conn.pingErr = errors.New("connection invalid")
return conn, nil
}, WithMaxConns(1), WithHealthCheckInterval(10*time.Millisecond))
}, WithMaxConns[*mockConn](1), WithHealthCheckInterval[*mockConn](10*time.Millisecond))
ctx := context.Background()
conn, _ := pool.Get(ctx)
@@ -458,9 +458,9 @@ func TestChanPool_InvalidConn(t *testing.T) {
// 测试连接池在并发关闭时的行为
func TestChanPool_ConcurrentClose(t *testing.T) {
pool := NewChannelPool(func() (Conn, error) {
pool := NewChannelPool(func() (*mockConn, error) {
return newMockConn(1), nil
}, WithMaxConns(10))
}, WithMaxConns[*mockConn](10))
var wg sync.WaitGroup
const goroutines = 10