diff --git a/internal/db/models/api_token_dao.go b/internal/db/models/api_token_dao.go index dc5fe41f..c4c64c9b 100644 --- a/internal/db/models/api_token_dao.go +++ b/internal/db/models/api_token_dao.go @@ -77,7 +77,7 @@ func (this *ApiTokenDAO) FindEnabledTokenWithNodeCacheable(tx *dbs.Tx, nodeId st State(ApiTokenStateEnabled). Find() if one != nil { - token := one.(*ApiToken) + token = one.(*ApiToken) SharedCacheLocker.Lock() apiTokenCacheMap[nodeId] = token SharedCacheLocker.Unlock() diff --git a/internal/db/models/node_dao.go b/internal/db/models/node_dao.go index b092ca77..4c6ed791 100644 --- a/internal/db/models/node_dao.go +++ b/internal/db/models/node_dao.go @@ -11,6 +11,7 @@ import ( "github.com/TeaOSLab/EdgeAPI/internal/utils" "github.com/TeaOSLab/EdgeAPI/internal/utils/numberutils" "github.com/TeaOSLab/EdgeAPI/internal/utils/sizes" + "github.com/TeaOSLab/EdgeAPI/internal/utils/ttlcache" "github.com/TeaOSLab/EdgeCommon/pkg/configutils" "github.com/TeaOSLab/EdgeCommon/pkg/nodeconfigs" "github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs" @@ -35,8 +36,6 @@ const ( NodeStateDisabled = 0 // 已禁用 ) -var nodeIdCacheMap = map[string]int64{} // uniqueId => nodeId - type NodeDAO dbs.DAO func NewNodeDAO() *NodeDAO { @@ -77,9 +76,7 @@ func (this *NodeDAO) DisableNode(tx *dbs.Tx, nodeId int64) (err error) { return err } if len(uniqueId) > 0 { - SharedCacheLocker.Lock() - delete(nodeIdCacheMap, uniqueId) - SharedCacheLocker.Unlock() + ttlcache.SharedCache.Delete("nodeId@uniqueId@" + uniqueId) } _, err = this.Query(tx). @@ -1279,36 +1276,39 @@ func (this *NodeDAO) UpdateNodeConnectedAPINodes(tx *dbs.Tx, nodeId int64, apiNo // FindEnabledNodeIdWithUniqueId 根据UniqueId获取ID func (this *NodeDAO) FindEnabledNodeIdWithUniqueId(tx *dbs.Tx, uniqueId string) (int64, error) { - return this.Query(tx). - State(NodeStateEnabled). - Attr("uniqueId", uniqueId). - ResultPk(). - FindInt64Col(0) -} - -// FindEnabledNodeIdWithUniqueIdCacheable 根据UniqueId获取ID,并可以使用缓存 -func (this *NodeDAO) FindEnabledNodeIdWithUniqueIdCacheable(tx *dbs.Tx, uniqueId string) (int64, error) { - SharedCacheLocker.RLock() - nodeId, ok := nodeIdCacheMap[uniqueId] - if ok { - SharedCacheLocker.RUnlock() - return nodeId, nil + var cacheKey = "nodeId@uniqueId@" + uniqueId + var item = ttlcache.SharedCache.Read(cacheKey) + if item != nil { + return types.Int64(item.Value), nil } - SharedCacheLocker.RUnlock() - nodeId, err := this.Query(tx). + + one, err := this.Query(tx). State(NodeStateEnabled). Attr("uniqueId", uniqueId). - ResultPk(). - FindInt64Col(0) + Result("id", "clusterId"). + Find() + if err != nil || one == nil { + return 0, err + } + + // 检查集群 + var node = one.(*Node) + var clusterId = int64(node.ClusterId) + if clusterId <= 0 { + return 0, nil + } + + isOn, err := SharedNodeClusterDAO.CheckNodeClusterIsOn(tx, clusterId) if err != nil { return 0, err } - if nodeId > 0 { - SharedCacheLocker.Lock() - nodeIdCacheMap[uniqueId] = nodeId - SharedCacheLocker.Unlock() + if !isOn { + return 0, nil } - return nodeId, nil + + ttlcache.SharedCache.Write(cacheKey, int64(node.Id), time.Now().Unix()+60) + + return int64(node.Id), nil } // CountAllEnabledNodesWithGrantId 计算使用某个认证的节点数量 diff --git a/internal/db/models/node_dao_test.go b/internal/db/models/node_dao_test.go index 5455b663..36a710c9 100644 --- a/internal/db/models/node_dao_test.go +++ b/internal/db/models/node_dao_test.go @@ -77,3 +77,34 @@ func TestNodeDAO_ComposeNodeConfig_ParentNodes(t *testing.T) { } logs.PrintAsJSON(nodeConfig.ParentNodes, t) } + +func TestNodeDAO_FindEnabledNodeIdWithUniqueId(t *testing.T) { + dbs.NotifyReady() + + var tx *dbs.Tx + // init + { + _, err := models.SharedNodeDAO.FindEnabledNodeIdWithUniqueId(tx, "a186380dbd26ccd49e75d178ec59df1b") + if err != nil { + t.Fatal(err) + } + } + + var before = time.Now() + nodeId, err := models.SharedNodeDAO.FindEnabledNodeIdWithUniqueId(tx, "a186380dbd26ccd49e75d178ec59df1b") + if err != nil { + t.Fatal(err) + } + t.Log("cost:", time.Since(before).Seconds()*1000, "ms") + t.Log("nodeId:", nodeId) + + { + before = time.Now() + nodeId, err := models.SharedNodeDAO.FindEnabledNodeIdWithUniqueId(tx, "a186380dbd26ccd49e75d178ec59df1b") + if err != nil { + t.Fatal(err) + } + t.Log("cost:", time.Since(before).Seconds()*1000, "ms") + t.Log("nodeId:", nodeId) + } +} diff --git a/internal/db/models/node_task_dao_test.go b/internal/db/models/node_task_dao_test.go index 992ca11b..7848f2b1 100644 --- a/internal/db/models/node_task_dao_test.go +++ b/internal/db/models/node_task_dao_test.go @@ -11,7 +11,7 @@ func TestNodeTaskDAO_CreateNodeTask(t *testing.T) { dbs.NotifyReady() var tx *dbs.Tx - err := SharedNodeTaskDAO.CreateNodeTask(tx, nodeconfigs.NodeRoleNode, 1, 2, 0, NodeTaskTypeConfigChanged, 0) + err := SharedNodeTaskDAO.CreateNodeTask(tx, nodeconfigs.NodeRoleNode, 1, 2, 0, 0, NodeTaskTypeConfigChanged, 0) if err != nil { t.Fatal(err) } @@ -22,7 +22,7 @@ func TestNodeTaskDAO_CreateClusterTask(t *testing.T) { dbs.NotifyReady() var tx *dbs.Tx - err := SharedNodeTaskDAO.CreateClusterTask(tx, nodeconfigs.NodeRoleNode, 1, 0, NodeTaskTypeConfigChanged) + err := SharedNodeTaskDAO.CreateClusterTask(tx, nodeconfigs.NodeRoleNode, 1, 0, 0, NodeTaskTypeConfigChanged) if err != nil { t.Fatal(err) } @@ -33,7 +33,7 @@ func TestNodeTaskDAO_ExtractClusterTask(t *testing.T) { dbs.NotifyReady() var tx *dbs.Tx - err := SharedNodeTaskDAO.ExtractNodeClusterTask(tx, 1, 0, NodeTaskTypeConfigChanged) + err := SharedNodeTaskDAO.ExtractNodeClusterTask(tx, 1, 0, 0, NodeTaskTypeConfigChanged) if err != nil { t.Fatal(err) } diff --git a/internal/rpc/utils/utils_ext.go b/internal/rpc/utils/utils_ext.go index 17b79212..aa8c0aa8 100644 --- a/internal/rpc/utils/utils_ext.go +++ b/internal/rpc/utils/utils_ext.go @@ -114,8 +114,7 @@ func ValidateRequest(ctx context.Context, userTypes ...UserType) (userType UserT switch apiToken.Role { case UserTypeNode: - // TODO 需要检查集群是否已经删除 - nodeIntId, err := models.SharedNodeDAO.FindEnabledNodeIdWithUniqueIdCacheable(nil, nodeId) + nodeIntId, err := models.SharedNodeDAO.FindEnabledNodeIdWithUniqueId(nil, nodeId) if err != nil { return UserTypeNode, 0, 0, errors.New("context: " + err.Error()) } diff --git a/internal/utils/expires/id_key_map.go b/internal/utils/expires/id_key_map.go new file mode 100644 index 00000000..2001bba9 --- /dev/null +++ b/internal/utils/expires/id_key_map.go @@ -0,0 +1,60 @@ +// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved. + +package expires + +type IdKeyMap struct { + idKeys map[int64]string // id => key + keyIds map[string]int64 // key => id +} + +func NewIdKeyMap() *IdKeyMap { + return &IdKeyMap{ + idKeys: map[int64]string{}, + keyIds: map[string]int64{}, + } +} + +func (this *IdKeyMap) Add(id int64, key string) { + oldKey, ok := this.idKeys[id] + if ok { + delete(this.keyIds, oldKey) + } + + oldId, ok := this.keyIds[key] + if ok { + delete(this.idKeys, oldId) + } + + this.idKeys[id] = key + this.keyIds[key] = id +} + +func (this *IdKeyMap) Key(id int64) (key string, ok bool) { + key, ok = this.idKeys[id] + return +} + +func (this *IdKeyMap) Id(key string) (id int64, ok bool) { + id, ok = this.keyIds[key] + return +} + +func (this *IdKeyMap) DeleteId(id int64) { + key, ok := this.idKeys[id] + if ok { + delete(this.keyIds, key) + } + delete(this.idKeys, id) +} + +func (this *IdKeyMap) DeleteKey(key string) { + id, ok := this.keyIds[key] + if ok { + delete(this.idKeys, id) + } + delete(this.keyIds, key) +} + +func (this *IdKeyMap) Len() int { + return len(this.idKeys) +} diff --git a/internal/utils/expires/id_key_map_test.go b/internal/utils/expires/id_key_map_test.go new file mode 100644 index 00000000..4fa60602 --- /dev/null +++ b/internal/utils/expires/id_key_map_test.go @@ -0,0 +1,46 @@ +// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved. + +package expires + +import ( + "github.com/iwind/TeaGo/assert" + "github.com/iwind/TeaGo/logs" + "testing" +) + +func TestNewIdKeyMap(t *testing.T) { + var a = assert.NewAssertion(t) + + var m = NewIdKeyMap() + m.Add(1, "1") + m.Add(1, "2") + m.Add(100, "100") + logs.PrintAsJSON(m.idKeys, t) + logs.PrintAsJSON(m.keyIds, t) + + { + k, ok := m.Key(1) + a.IsTrue(ok) + a.IsTrue(k == "2") + } + + { + _, ok := m.Key(2) + a.IsFalse(ok) + } + + m.DeleteKey("2") + + { + _, ok := m.Key(1) + a.IsFalse(ok) + } + + logs.PrintAsJSON(m.idKeys, t) + logs.PrintAsJSON(m.keyIds, t) + + m.DeleteId(100) + + logs.PrintAsJSON(m.idKeys, t) + logs.PrintAsJSON(m.keyIds, t) +} diff --git a/internal/utils/expires/list.go b/internal/utils/expires/list.go new file mode 100644 index 00000000..65ec0954 --- /dev/null +++ b/internal/utils/expires/list.go @@ -0,0 +1,159 @@ +package expires + +import ( + "github.com/TeaOSLab/EdgeAPI/internal/zero" + "sync" +) + +type ItemMap = map[uint64]zero.Zero + +type List struct { + expireMap map[int64]ItemMap // expires timestamp => map[id]ItemMap + itemsMap map[uint64]int64 // itemId => timestamp + + locker sync.Mutex + + gcCallback func(itemId uint64) + gcBatchCallback func(itemIds ItemMap) + + lastTimestamp int64 +} + +func NewList() *List { + var list = &List{ + expireMap: map[int64]ItemMap{}, + itemsMap: map[uint64]int64{}, + } + + SharedManager.Add(list) + + return list +} + +func NewSingletonList() *List { + var list = &List{ + expireMap: map[int64]ItemMap{}, + itemsMap: map[uint64]int64{}, + } + + return list +} + +// Add 添加条目 +// 如果条目已经存在,则覆盖 +func (this *List) Add(itemId uint64, expiresAt int64) { + this.locker.Lock() + defer this.locker.Unlock() + + if this.lastTimestamp == 0 || this.lastTimestamp > expiresAt { + this.lastTimestamp = expiresAt + } + + // 是否已经存在 + oldExpiresAt, ok := this.itemsMap[itemId] + if ok { + if oldExpiresAt == expiresAt { + return + } + delete(this.expireMap, oldExpiresAt) + } + + expireItemMap, ok := this.expireMap[expiresAt] + if ok { + expireItemMap[itemId] = zero.New() + } else { + expireItemMap = ItemMap{ + itemId: zero.New(), + } + this.expireMap[expiresAt] = expireItemMap + } + + this.itemsMap[itemId] = expiresAt +} + +func (this *List) Remove(itemId uint64) { + this.locker.Lock() + defer this.locker.Unlock() + this.removeItem(itemId) +} + +func (this *List) ExpiresAt(itemId uint64) int64 { + this.locker.Lock() + defer this.locker.Unlock() + return this.itemsMap[itemId] +} + +func (this *List) GC(timestamp int64) ItemMap { + if this.lastTimestamp > timestamp+1 { + return nil + } + this.locker.Lock() + var itemMap = this.gcItems(timestamp) + if len(itemMap) == 0 { + this.locker.Unlock() + return itemMap + } + this.locker.Unlock() + + if this.gcCallback != nil { + for itemId := range itemMap { + this.gcCallback(itemId) + } + } + if this.gcBatchCallback != nil { + this.gcBatchCallback(itemMap) + } + + return itemMap +} + +func (this *List) Clean() { + this.locker.Lock() + this.itemsMap = map[uint64]int64{} + this.expireMap = map[int64]ItemMap{} + this.locker.Unlock() +} + +func (this *List) Count() int { + this.locker.Lock() + var count = len(this.itemsMap) + this.locker.Unlock() + return count +} + +func (this *List) OnGC(callback func(itemId uint64)) *List { + this.gcCallback = callback + return this +} + +func (this *List) OnGCBatch(callback func(itemMap ItemMap)) *List { + this.gcBatchCallback = callback + return this +} + +func (this *List) removeItem(itemId uint64) { + expiresAt, ok := this.itemsMap[itemId] + if !ok { + return + } + delete(this.itemsMap, itemId) + + expireItemMap, ok := this.expireMap[expiresAt] + if ok { + delete(expireItemMap, itemId) + if len(expireItemMap) == 0 { + delete(this.expireMap, expiresAt) + } + } +} + +func (this *List) gcItems(timestamp int64) ItemMap { + expireItemsMap, ok := this.expireMap[timestamp] + if ok { + for itemId := range expireItemsMap { + delete(this.itemsMap, itemId) + } + delete(this.expireMap, timestamp) + } + return expireItemsMap +} diff --git a/internal/utils/expires/list_test.go b/internal/utils/expires/list_test.go new file mode 100644 index 00000000..5a3dbd46 --- /dev/null +++ b/internal/utils/expires/list_test.go @@ -0,0 +1,216 @@ +package expires + +import ( + "github.com/TeaOSLab/EdgeAPI/internal/utils" + "github.com/iwind/TeaGo/assert" + "github.com/iwind/TeaGo/logs" + timeutil "github.com/iwind/TeaGo/utils/time" + "math" + "runtime" + "testing" + "time" +) + +func TestList_Add(t *testing.T) { + list := NewList() + list.Add(1, time.Now().Unix()) + t.Log("===BEFORE===") + logs.PrintAsJSON(list.expireMap, t) + logs.PrintAsJSON(list.itemsMap, t) + + list.Add(1, time.Now().Unix()+1) + list.Add(2, time.Now().Unix()+1) + list.Add(3, time.Now().Unix()+2) + t.Log("===AFTER===") + logs.PrintAsJSON(list.expireMap, t) + logs.PrintAsJSON(list.itemsMap, t) +} + +func TestList_Add_Overwrite(t *testing.T) { + var timestamp = time.Now().Unix() + + list := NewList() + list.Add(1, timestamp+1) + list.Add(1, timestamp+1) + list.Add(1, timestamp+2) + logs.PrintAsJSON(list.expireMap, t) + logs.PrintAsJSON(list.itemsMap, t) + + var a = assert.NewAssertion(t) + a.IsTrue(len(list.itemsMap) == 1) + a.IsTrue(len(list.expireMap) == 1) + a.IsTrue(list.itemsMap[1] == timestamp+2) +} + +func TestList_Remove(t *testing.T) { + list := NewList() + list.Add(1, time.Now().Unix()+1) + list.Remove(1) + logs.PrintAsJSON(list.expireMap, t) + logs.PrintAsJSON(list.itemsMap, t) +} + +func TestList_GC(t *testing.T) { + var unixTime = time.Now().Unix() + t.Log("unixTime:", unixTime) + + var list = NewList() + list.Add(1, unixTime+1) + list.Add(2, unixTime+1) + list.Add(3, unixTime+2) + list.OnGC(func(itemId uint64) { + t.Log("gc:", itemId) + }) + t.Log("last unixTime:", list.lastTimestamp) + list.GC(time.Now().Unix() + 2) + logs.PrintAsJSON(list.expireMap, t) + logs.PrintAsJSON(list.itemsMap, t) + + t.Log(list.Count()) +} + +func TestList_GC_Batch(t *testing.T) { + list := NewList() + list.Add(1, time.Now().Unix()+1) + list.Add(2, time.Now().Unix()+1) + list.Add(3, time.Now().Unix()+2) + list.Add(4, time.Now().Unix()+2) + list.OnGCBatch(func(itemMap ItemMap) { + t.Log("gc:", itemMap) + }) + list.GC(time.Now().Unix() + 2) + logs.PrintAsJSON(list.expireMap, t) + logs.PrintAsJSON(list.itemsMap, t) +} + +func TestList_Start_GC(t *testing.T) { + list := NewList() + list.Add(1, time.Now().Unix()+1) + list.Add(2, time.Now().Unix()+1) + list.Add(3, time.Now().Unix()+2) + list.Add(4, time.Now().Unix()+5) + list.Add(5, time.Now().Unix()+5) + list.Add(6, time.Now().Unix()+6) + list.Add(7, time.Now().Unix()+6) + list.Add(8, time.Now().Unix()+6) + + list.OnGC(func(itemId uint64) { + t.Log("gc:", itemId, timeutil.Format("H:i:s")) + time.Sleep(2 * time.Second) + }) + + go func() { + SharedManager.Add(list) + }() + + time.Sleep(20 * time.Second) +} + +func TestList_ManyItems(t *testing.T) { + list := NewList() + for i := 0; i < 1_000; i++ { + list.Add(uint64(i), time.Now().Unix()) + } + for i := 0; i < 1_000; i++ { + list.Add(uint64(i), time.Now().Unix()+1) + } + + now := time.Now() + count := 0 + list.OnGC(func(itemId uint64) { + count++ + }) + list.GC(time.Now().Unix() + 1) + t.Log("gc", count, "items") + t.Log(time.Now().Sub(now)) +} + +func TestList_Map_Performance(t *testing.T) { + t.Log("max uint32", math.MaxUint32) + + var timestamp = time.Now().Unix() + + { + m := map[int64]int64{} + for i := 0; i < 1_000_000; i++ { + m[int64(i)] = timestamp + } + + now := time.Now() + for i := 0; i < 100_000; i++ { + delete(m, int64(i)) + } + t.Log(time.Now().Sub(now)) + } + + { + m := map[uint64]int64{} + for i := 0; i < 1_000_000; i++ { + m[uint64(i)] = timestamp + } + + now := time.Now() + for i := 0; i < 100_000; i++ { + delete(m, uint64(i)) + } + t.Log(time.Now().Sub(now)) + } + + { + m := map[uint32]int64{} + for i := 0; i < 1_000_000; i++ { + m[uint32(i)] = timestamp + } + + now := time.Now() + for i := 0; i < 100_000; i++ { + delete(m, uint32(i)) + } + t.Log(time.Now().Sub(now)) + } +} + +func Benchmark_Map_Uint64(b *testing.B) { + runtime.GOMAXPROCS(1) + var timestamp = uint64(time.Now().Unix()) + + var i uint64 + var count uint64 = 1_000_000 + + m := map[uint64]uint64{} + for i = 0; i < count; i++ { + m[i] = timestamp + } + + for n := 0; n < b.N; n++ { + for i = 0; i < count; i++ { + _ = m[i] + } + } +} + +func BenchmarkList_GC(b *testing.B) { + runtime.GOMAXPROCS(1) + + var lists = []*List{} + + for m := 0; m < 1_000; m++ { + var list = NewList() + for j := 0; j < 10_000; j++ { + list.Add(uint64(j), utils.UnixTime()+100) + } + lists = append(lists, list) + } + + b.ResetTimer() + + var timestamp = time.Now().Unix() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + for _, list := range lists { + list.GC(timestamp) + } + } + }) +} diff --git a/internal/utils/expires/manager.go b/internal/utils/expires/manager.go new file mode 100644 index 00000000..c9b33532 --- /dev/null +++ b/internal/utils/expires/manager.go @@ -0,0 +1,73 @@ +// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved. + +package expires + +import ( + "github.com/TeaOSLab/EdgeAPI/internal/goman" + "github.com/TeaOSLab/EdgeAPI/internal/zero" + "sync" + "time" +) + +var SharedManager = NewManager() + +type Manager struct { + listMap map[*List]zero.Zero + locker sync.Mutex + ticker *time.Ticker +} + +func NewManager() *Manager { + var manager = &Manager{ + listMap: map[*List]zero.Zero{}, + ticker: time.NewTicker(1 * time.Second), + } + goman.New(func() { + manager.init() + }) + return manager +} + +func (this *Manager) init() { + var lastTimestamp = int64(0) + for range this.ticker.C { + var currentTime = time.Now().Unix() + if lastTimestamp == 0 { + lastTimestamp = currentTime - 3600 + } + + if currentTime >= lastTimestamp { + for i := lastTimestamp; i <= currentTime; i++ { + this.locker.Lock() + for list := range this.listMap { + list.GC(i) + } + this.locker.Unlock() + } + } else { + // 如果过去的时间比现在大,则从这一秒重新开始 + for i := currentTime; i <= currentTime; i++ { + this.locker.Lock() + for list := range this.listMap { + list.GC(i) + } + this.locker.Unlock() + } + } + + // 这样做是为了防止系统时钟突变 + lastTimestamp = currentTime + } +} + +func (this *Manager) Add(list *List) { + this.locker.Lock() + this.listMap[list] = zero.New() + this.locker.Unlock() +} + +func (this *Manager) Remove(list *List) { + this.locker.Lock() + delete(this.listMap, list) + this.locker.Unlock() +} diff --git a/internal/utils/testutils/memory.go b/internal/utils/testutils/memory.go new file mode 100644 index 00000000..1a4a7381 --- /dev/null +++ b/internal/utils/testutils/memory.go @@ -0,0 +1,51 @@ +// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. + +package testutils + +import ( + "fmt" + timeutil "github.com/iwind/TeaGo/utils/time" + "runtime" + "testing" + "time" +) + +func StartMemoryStatsGC(t *testing.T) { + var ticker = time.NewTicker(1 * time.Second) + go func() { + var stat = &runtime.MemStats{} + var lastHeapInUse uint64 + + for range ticker.C { + runtime.ReadMemStats(stat) + if stat.HeapInuse == lastHeapInUse { + return + } + lastHeapInUse = stat.HeapInuse + + var before = time.Now() + runtime.GC() + var cost = time.Since(before).Seconds() + + t.Log(timeutil.Format("H:i:s"), "HeapInuse:", fmt.Sprintf("%.2fM", float64(stat.HeapInuse)/1024/1024), "NumGC:", stat.NumGC, "Cost:", fmt.Sprintf("%.4f", cost*1000), "ms") + } + }() +} + +func StartMemoryStats(t *testing.T) { + var ticker = time.NewTicker(1 * time.Second) + go func() { + var stat = &runtime.MemStats{} + var lastHeapInUse uint64 + + for range ticker.C { + runtime.ReadMemStats(stat) + if stat.HeapInuse == lastHeapInUse { + return + } + lastHeapInUse = stat.HeapInuse + + t.Log(timeutil.Format("H:i:s"), "HeapInuse:", fmt.Sprintf("%.2fM", float64(stat.HeapInuse)/1024/1024), "NumGC:", stat.NumGC) + } + }() +} diff --git a/internal/utils/ttlcache/cache.go b/internal/utils/ttlcache/cache.go new file mode 100644 index 00000000..8c747f2a --- /dev/null +++ b/internal/utils/ttlcache/cache.go @@ -0,0 +1,161 @@ +package ttlcache + +import ( + "github.com/TeaOSLab/EdgeAPI/internal/utils" + "time" +) + +var SharedCache = NewCache() + +// Cache TTL缓存 +// 最大的缓存时间为30 * 86400 +// Piece数据结构: +// Piece1 | Piece2 | Piece3 | ... +// [ Item1, Item2, ... ] | ... +// KeyMap列表数据结构 +// { timestamp1 => [key1, key2, ...] }, ... +type Cache struct { + isDestroyed bool + pieces []*Piece + countPieces uint64 + maxItems int + + gcPieceIndex int +} + +func NewCache(opt ...OptionInterface) *Cache { + var countPieces = 256 + var maxItems = 1_000_000 + + var totalMemory = utils.SystemMemoryGB() + if totalMemory < 2 { + // 我们限制内存过小的服务能够使用的数量 + maxItems = 500_000 + } else { + var delta = totalMemory / 8 + if delta > 0 { + maxItems *= delta + } + } + + for _, option := range opt { + if option == nil { + continue + } + switch o := option.(type) { + case *PiecesOption: + if o.Count > 0 { + countPieces = o.Count + } + case *MaxItemsOption: + if o.Count > 0 { + maxItems = o.Count + } + } + } + + var cache = &Cache{ + countPieces: uint64(countPieces), + maxItems: maxItems, + } + + for i := 0; i < countPieces; i++ { + cache.pieces = append(cache.pieces, NewPiece(maxItems/countPieces)) + } + + // Add to manager + SharedManager.Add(cache) + + return cache +} + +func (this *Cache) Write(key string, value interface{}, expiredAt int64) (ok bool) { + if this.isDestroyed { + return + } + + var currentTimestamp = utils.UnixTime() + if expiredAt <= currentTimestamp { + return + } + + var maxExpiredAt = currentTimestamp + 30*86400 + if expiredAt > maxExpiredAt { + expiredAt = maxExpiredAt + } + uint64Key := HashKey([]byte(key)) + pieceIndex := uint64Key % this.countPieces + return this.pieces[pieceIndex].Add(uint64Key, &Item{ + Value: value, + expiredAt: expiredAt, + }) +} + +func (this *Cache) IncreaseInt64(key string, delta int64, expiredAt int64, extend bool) int64 { + if this.isDestroyed { + return 0 + } + + currentTimestamp := time.Now().Unix() + if expiredAt <= currentTimestamp { + return 0 + } + + maxExpiredAt := currentTimestamp + 30*86400 + if expiredAt > maxExpiredAt { + expiredAt = maxExpiredAt + } + uint64Key := HashKey([]byte(key)) + pieceIndex := uint64Key % this.countPieces + return this.pieces[pieceIndex].IncreaseInt64(uint64Key, delta, expiredAt, extend) +} + +func (this *Cache) Read(key string) (item *Item) { + uint64Key := HashKey([]byte(key)) + return this.pieces[uint64Key%this.countPieces].Read(uint64Key) +} + +func (this *Cache) readIntKey(key uint64) (value *Item) { + return this.pieces[key%this.countPieces].Read(key) +} + +func (this *Cache) Delete(key string) { + uint64Key := HashKey([]byte(key)) + this.pieces[uint64Key%this.countPieces].Delete(uint64Key) +} + +func (this *Cache) deleteIntKey(key uint64) { + this.pieces[key%this.countPieces].Delete(key) +} + +func (this *Cache) Count() (count int) { + for _, piece := range this.pieces { + count += piece.Count() + } + return +} + +func (this *Cache) GC() { + this.pieces[this.gcPieceIndex].GC() + newIndex := this.gcPieceIndex + 1 + if newIndex >= int(this.countPieces) { + newIndex = 0 + } + this.gcPieceIndex = newIndex +} + +func (this *Cache) Clean() { + for _, piece := range this.pieces { + piece.Clean() + } +} + +func (this *Cache) Destroy() { + SharedManager.Remove(this) + + this.isDestroyed = true + + for _, piece := range this.pieces { + piece.Destroy() + } +} diff --git a/internal/utils/ttlcache/cache_test.go b/internal/utils/ttlcache/cache_test.go new file mode 100644 index 00000000..67e505ab --- /dev/null +++ b/internal/utils/ttlcache/cache_test.go @@ -0,0 +1,246 @@ +package ttlcache + +import ( + "github.com/TeaOSLab/EdgeAPI/internal/utils" + "github.com/TeaOSLab/EdgeAPI/internal/utils/testutils" + "github.com/iwind/TeaGo/assert" + "github.com/iwind/TeaGo/rands" + "github.com/iwind/TeaGo/types" + "runtime" + "strconv" + "sync/atomic" + "testing" + "time" +) + +func TestNewCache(t *testing.T) { + var cache = NewCache() + cache.Write("a", 1, time.Now().Unix()+3600) + cache.Write("b", 2, time.Now().Unix()+3601) + cache.Write("a", 1, time.Now().Unix()+3602) + cache.Write("d", 1, time.Now().Unix()+1) + + for _, piece := range cache.pieces { + if len(piece.m) > 0 { + for k, item := range piece.m { + t.Log(k, "=>", item.Value, item.expiredAt) + } + } + } + t.Log("a:", cache.Read("a")) + time.Sleep(2 * time.Second) + t.Log("d:", cache.Read("d")) // should be nil + t.Log("left:", cache.Count(), "items") +} + +func TestCache_Memory(t *testing.T) { + testutils.StartMemoryStats(t) + + var cache = NewCache() + var count = 2_000_000 + for i := 0; i < count; i++ { + cache.Write("a"+strconv.Itoa(i), 1, time.Now().Unix()+3600) + } + + t.Log(cache.Count()) + + time.Sleep(10 * time.Second) + for i := 0; i < count; i++ { + if i%2 == 0 { + cache.Delete("a" + strconv.Itoa(i)) + } + } + + t.Log(cache.Count()) + + cache.Count() + + time.Sleep(10 * time.Second) +} + +func TestCache_IncreaseInt64(t *testing.T) { + var a = assert.NewAssertion(t) + + var cache = NewCache() + var unixTime = time.Now().Unix() + + { + cache.IncreaseInt64("a", 1, unixTime+3600, false) + var item = cache.Read("a") + t.Log(item) + a.IsTrue(item.Value == int64(1)) + a.IsTrue(item.expiredAt == unixTime+3600) + } + { + cache.IncreaseInt64("a", 1, unixTime+3600+1, true) + var item = cache.Read("a") + t.Log(item) + a.IsTrue(item.Value == int64(2)) + a.IsTrue(item.expiredAt == unixTime+3600+1) + } + { + cache.Write("b", 1, time.Now().Unix()+3600+2) + t.Log(cache.Read("b")) + } + { + cache.IncreaseInt64("b", 1, time.Now().Unix()+3600+3, false) + t.Log(cache.Read("b")) + } +} + +func TestCache_Read(t *testing.T) { + runtime.GOMAXPROCS(1) + + var cache = NewCache(PiecesOption{Count: 32}) + + for i := 0; i < 10_000_000; i++ { + cache.Write("HELLO_WORLD_"+strconv.Itoa(i), i, time.Now().Unix()+int64(i%10240)+1) + } + time.Sleep(10 * time.Second) + + total := 0 + for _, piece := range cache.pieces { + //t.Log(len(piece.m), "keys") + total += len(piece.m) + } + t.Log(total, "total keys") + + before := time.Now() + for i := 0; i < 10_240; i++ { + _ = cache.Read("HELLO_WORLD_" + strconv.Itoa(i)) + } + t.Log(time.Since(before).Seconds()*1000, "ms") +} + +func TestCache_GC(t *testing.T) { + var cache = NewCache(&PiecesOption{Count: 5}) + cache.Write("a", 1, time.Now().Unix()+1) + cache.Write("b", 2, time.Now().Unix()+2) + cache.Write("c", 3, time.Now().Unix()+3) + cache.Write("d", 4, time.Now().Unix()+4) + cache.Write("e", 5, time.Now().Unix()+10) + + go func() { + for i := 0; i < 1000; i++ { + cache.Write("f", 1, time.Now().Unix()+1) + time.Sleep(10 * time.Millisecond) + } + }() + + for i := 0; i < 20; i++ { + cache.GC() + t.Log("items:", cache.Count()) + + if cache.Count() == 0 { + break + } + time.Sleep(1 * time.Second) + } + + t.Log("now:", time.Now().Unix()) + for _, p := range cache.pieces { + t.Log("expire list:", p.expiresList.Count(), p.expiresList) + for k, v := range p.m { + t.Log(k, v.Value, v.expiredAt) + } + } +} + +func TestCache_GC2(t *testing.T) { + runtime.GOMAXPROCS(1) + + var cache1 = NewCache(NewPiecesOption(32)) + for i := 0; i < 1_000_000; i++ { + cache1.Write(strconv.Itoa(i), i, time.Now().Unix()+int64(rands.Int(0, 10))) + } + + var cache2 = NewCache(NewPiecesOption(5)) + for i := 0; i < 1_000_000; i++ { + cache2.Write(strconv.Itoa(i), i, time.Now().Unix()+int64(rands.Int(0, 10))) + } + + for i := 0; i < 100; i++ { + t.Log(cache1.Count(), "items", cache2.Count(), "items") + time.Sleep(1 * time.Second) + } +} + +func TestCacheDestroy(t *testing.T) { + var cache = NewCache() + t.Log("count:", SharedManager.Count()) + cache.Destroy() + t.Log("count:", SharedManager.Count()) +} + +func BenchmarkNewCache(b *testing.B) { + runtime.GOMAXPROCS(1) + + var cache = NewCache(NewPiecesOption(128)) + for i := 0; i < 2_000_000; i++ { + cache.Write(strconv.Itoa(i), i, time.Now().Unix()+int64(rands.Int(10, 100))) + } + b.Log("start reading ...") + + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + cache.Read(strconv.Itoa(rands.Int(0, 999999))) + } + }) +} + +func BenchmarkCache_Add(b *testing.B) { + runtime.GOMAXPROCS(1) + + var cache = NewCache() + for i := 0; i < b.N; i++ { + cache.Write(strconv.Itoa(i), i, utils.UnixTime()+int64(i%1024)) + } +} + +func BenchmarkCache_Add_Parallel(b *testing.B) { + runtime.GOMAXPROCS(1) + + var cache = NewCache() + var i int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var j = atomic.AddInt64(&i, 1) + cache.Write(types.String(j), j, utils.UnixTime()+i%1024) + } + }) +} + +func BenchmarkNewCacheGC(b *testing.B) { + runtime.GOMAXPROCS(1) + + var cache = NewCache(NewPiecesOption(1024)) + for i := 0; i < 3_000_000; i++ { + cache.Write(strconv.Itoa(i), i, time.Now().Unix()+int64(rands.Int(0, 100))) + } + //b.Log(cache.pieces[0].Count()) + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + cache.GC() + } + }) +} + +func BenchmarkNewCacheClean(b *testing.B) { + runtime.GOMAXPROCS(1) + + var cache = NewCache(NewPiecesOption(128)) + for i := 0; i < 3_000_000; i++ { + cache.Write(strconv.Itoa(i), i, time.Now().Unix()+int64(rands.Int(10, 100))) + } + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + cache.Clean() + } + }) +} diff --git a/internal/utils/ttlcache/item.go b/internal/utils/ttlcache/item.go new file mode 100644 index 00000000..9720e295 --- /dev/null +++ b/internal/utils/ttlcache/item.go @@ -0,0 +1,6 @@ +package ttlcache + +type Item struct { + Value interface{} + expiredAt int64 +} diff --git a/internal/utils/ttlcache/manager.go b/internal/utils/ttlcache/manager.go new file mode 100644 index 00000000..4dfc0d62 --- /dev/null +++ b/internal/utils/ttlcache/manager.go @@ -0,0 +1,60 @@ +// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved. + +package ttlcache + +import ( + "github.com/TeaOSLab/EdgeAPI/internal/goman" + "github.com/TeaOSLab/EdgeAPI/internal/zero" + "sync" + "time" +) + +var SharedManager = NewManager() + +type Manager struct { + ticker *time.Ticker + locker sync.Mutex + + cacheMap map[*Cache]zero.Zero +} + +func NewManager() *Manager { + var manager = &Manager{ + ticker: time.NewTicker(2 * time.Second), + cacheMap: map[*Cache]zero.Zero{}, + } + + goman.New(func() { + manager.init() + }) + + return manager +} + +func (this *Manager) init() { + for range this.ticker.C { + this.locker.Lock() + for cache := range this.cacheMap { + cache.GC() + } + this.locker.Unlock() + } +} + +func (this *Manager) Add(cache *Cache) { + this.locker.Lock() + this.cacheMap[cache] = zero.New() + this.locker.Unlock() +} + +func (this *Manager) Remove(cache *Cache) { + this.locker.Lock() + delete(this.cacheMap, cache) + this.locker.Unlock() +} + +func (this *Manager) Count() int { + this.locker.Lock() + defer this.locker.Unlock() + return len(this.cacheMap) +} diff --git a/internal/utils/ttlcache/option.go b/internal/utils/ttlcache/option.go new file mode 100644 index 00000000..09a4cf89 --- /dev/null +++ b/internal/utils/ttlcache/option.go @@ -0,0 +1,20 @@ +package ttlcache + +type OptionInterface interface { +} + +type PiecesOption struct { + Count int +} + +func NewPiecesOption(count int) *PiecesOption { + return &PiecesOption{Count: count} +} + +type MaxItemsOption struct { + Count int +} + +func NewMaxItemsOption(count int) *MaxItemsOption { + return &MaxItemsOption{Count: count} +} diff --git a/internal/utils/ttlcache/piece.go b/internal/utils/ttlcache/piece.go new file mode 100644 index 00000000..d42ab141 --- /dev/null +++ b/internal/utils/ttlcache/piece.go @@ -0,0 +1,138 @@ +package ttlcache + +import ( + "github.com/TeaOSLab/EdgeAPI/internal/utils" + "github.com/TeaOSLab/EdgeAPI/internal/utils/expires" + "github.com/iwind/TeaGo/types" + "sync" + "time" +) + +type Piece struct { + m map[uint64]*Item + expiresList *expires.List + maxItems int + lastGCTime int64 + + locker sync.RWMutex +} + +func NewPiece(maxItems int) *Piece { + return &Piece{ + m: map[uint64]*Item{}, + expiresList: expires.NewSingletonList(), + maxItems: maxItems, + } +} + +func (this *Piece) Add(key uint64, item *Item) (ok bool) { + this.locker.Lock() + if len(this.m) >= this.maxItems { + this.locker.Unlock() + return + } + this.m[key] = item + this.locker.Unlock() + + this.expiresList.Add(key, item.expiredAt) + + return true +} + +func (this *Piece) IncreaseInt64(key uint64, delta int64, expiredAt int64, extend bool) (result int64) { + this.locker.Lock() + item, ok := this.m[key] + if ok && item.expiredAt > time.Now().Unix() { + result = types.Int64(item.Value) + delta + item.Value = result + if extend { + item.expiredAt = expiredAt + } + this.expiresList.Add(key, expiredAt) + } else { + if len(this.m) < this.maxItems { + result = delta + this.m[key] = &Item{ + Value: delta, + expiredAt: expiredAt, + } + this.expiresList.Add(key, expiredAt) + } + } + this.locker.Unlock() + + return +} + +func (this *Piece) Delete(key uint64) { + this.expiresList.Remove(key) + + this.locker.Lock() + delete(this.m, key) + this.locker.Unlock() +} + +func (this *Piece) Read(key uint64) (item *Item) { + this.locker.RLock() + item = this.m[key] + if item != nil && item.expiredAt < utils.UnixTime() { + item = nil + } + this.locker.RUnlock() + + return +} + +func (this *Piece) Count() (count int) { + this.locker.RLock() + count = len(this.m) + this.locker.RUnlock() + return +} + +func (this *Piece) GC() { + var currentTime = time.Now().Unix() + if this.lastGCTime == 0 { + this.lastGCTime = currentTime - 3600 + } + + var min = this.lastGCTime + var max = currentTime + if min > max { + // 过去的时间比现在大,则从这一秒重新开始 + min = max + } + + for i := min; i <= max; i++ { + var itemMap = this.expiresList.GC(i) + if len(itemMap) > 0 { + this.gcItemMap(itemMap) + } + } + + this.lastGCTime = currentTime +} + +func (this *Piece) Clean() { + this.locker.Lock() + this.m = map[uint64]*Item{} + this.locker.Unlock() + + this.expiresList.Clean() +} + +func (this *Piece) Destroy() { + this.locker.Lock() + this.m = nil + this.locker.Unlock() + + this.expiresList.Clean() +} + +func (this *Piece) gcItemMap(itemMap expires.ItemMap) { + this.locker.Lock() + for key := range itemMap { + delete(this.m, key) + } + this.locker.Unlock() +} diff --git a/internal/utils/ttlcache/piece_test.go b/internal/utils/ttlcache/piece_test.go new file mode 100644 index 00000000..0ec0ff15 --- /dev/null +++ b/internal/utils/ttlcache/piece_test.go @@ -0,0 +1,60 @@ +package ttlcache + +import ( + "github.com/iwind/TeaGo/rands" + "testing" + "time" +) + +func TestPiece_Add(t *testing.T) { + piece := NewPiece(10) + piece.Add(1, &Item{expiredAt: time.Now().Unix() + 3600}) + piece.Add(2, &Item{}) + piece.Add(3, &Item{}) + piece.Delete(3) + for key, item := range piece.m { + t.Log(key, item.Value) + } + t.Log(piece.Read(1)) +} + +func TestPiece_MaxItems(t *testing.T) { + piece := NewPiece(10) + for i := 0; i < 1000; i++ { + piece.Add(uint64(i), &Item{expiredAt: time.Now().Unix() + 3600}) + } + t.Log(len(piece.m)) +} + +func TestPiece_GC(t *testing.T) { + piece := NewPiece(10) + piece.Add(1, &Item{Value: 1, expiredAt: time.Now().Unix() + 1}) + piece.Add(2, &Item{Value: 2, expiredAt: time.Now().Unix() + 1}) + piece.Add(3, &Item{Value: 3, expiredAt: time.Now().Unix() + 1}) + t.Log("before gc ===") + for key, item := range piece.m { + t.Log(key, item.Value) + } + + time.Sleep(1 * time.Second) + piece.GC() + + t.Log("after gc ===") + for key, item := range piece.m { + t.Log(key, item.Value) + } +} + +func TestPiece_GC2(t *testing.T) { + piece := NewPiece(10) + for i := 0; i < 10_000; i++ { + piece.Add(uint64(i), &Item{Value: 1, expiredAt: time.Now().Unix() + int64(rands.Int(1, 10))}) + } + + time.Sleep(1 * time.Second) + + before := time.Now() + piece.GC() + t.Log(time.Since(before).Seconds()*1000, "ms") + t.Log(piece.Count()) +} diff --git a/internal/utils/ttlcache/utils.go b/internal/utils/ttlcache/utils.go new file mode 100644 index 00000000..59ff78bd --- /dev/null +++ b/internal/utils/ttlcache/utils.go @@ -0,0 +1,7 @@ +package ttlcache + +import "github.com/cespare/xxhash" + +func HashKey(key []byte) uint64 { + return xxhash.Sum64(key) +} diff --git a/internal/utils/ttlcache/utils_test.go b/internal/utils/ttlcache/utils_test.go new file mode 100644 index 00000000..5f9c88ff --- /dev/null +++ b/internal/utils/ttlcache/utils_test.go @@ -0,0 +1,13 @@ +package ttlcache + +import ( + "runtime" + "testing" +) + +func BenchmarkHashKey(b *testing.B) { + runtime.GOMAXPROCS(1) + for i := 0; i < b.N; i++ { + HashKey([]byte("HELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLD")) + } +} diff --git a/internal/utils/unix_time.go b/internal/utils/unix_time.go new file mode 100644 index 00000000..f5e76831 --- /dev/null +++ b/internal/utils/unix_time.go @@ -0,0 +1,63 @@ +package utils + +import ( + "github.com/TeaOSLab/EdgeAPI/internal/goman" + "github.com/iwind/TeaGo/types" + "time" +) + +var unixTime = time.Now().Unix() +var unixTimeMilli = time.Now().UnixMilli() +var unixTimeMilliString = types.String(unixTimeMilli) + +func init() { + var ticker = time.NewTicker(200 * time.Millisecond) + goman.New(func() { + for range ticker.C { + unixTime = time.Now().Unix() + unixTimeMilli = time.Now().UnixMilli() + unixTimeMilliString = types.String(unixTimeMilli) + } + }) +} + +// UnixTime 最快获取时间戳的方式,通常用在不需要特别精确时间戳的场景 +func UnixTime() int64 { + return unixTime +} + +// FloorUnixTime 取整 +func FloorUnixTime(seconds int) int64 { + return UnixTime() / int64(seconds) * int64(seconds) +} + +// CeilUnixTime 取整并加1 +func CeilUnixTime(seconds int) int64 { + return UnixTime()/int64(seconds)*int64(seconds) + int64(seconds) +} + +// NextMinuteUnixTime 获取下一分钟开始的时间戳 +func NextMinuteUnixTime() int64 { + return CeilUnixTime(60) +} + +// UnixTimeMilli 获取时间戳,精确到毫秒 +func UnixTimeMilli() int64 { + return unixTimeMilli +} + +func UnixTimeMilliString() (int64, string) { + return unixTimeMilli, unixTimeMilliString +} + +// GMTUnixTime 计算GMT时间戳 +func GMTUnixTime(timestamp int64) int64 { + _, offset := time.Now().Zone() + return timestamp - int64(offset) +} + +// GMTTime 计算GMT时间 +func GMTTime(t time.Time) time.Time { + _, offset := time.Now().Zone() + return t.Add(-time.Duration(offset) * time.Second) +}