优化代码

This commit is contained in:
GoEdgeLab
2022-08-14 16:28:40 +08:00
parent e22473ce00
commit d1c84fb002
7 changed files with 84 additions and 14 deletions

View File

@@ -65,10 +65,11 @@ var sharedWritingFileKeyLocker = sync.Mutex{}
var maxOpenFiles = NewMaxOpenFiles()
const maxOpenFilesSlowCost = 5000 * time.Microsecond // 0.5ms
const maxOpenFilesSlowCost = 1000 * time.Microsecond // us
const protectingLoadWhenDump = false
// FileStorage 文件缓存
//
// 文件结构:
// [expires time] | [ status ] | [url length] | [header length] | [body length] | [url] [header data] [body data]
type FileStorage struct {

View File

@@ -95,7 +95,8 @@ func (this *MemoryStorage) Init() error {
// 启动定时Flush memory to disk任务
if this.parentStorage != nil {
// TODO 应该根据磁盘性能决定线程数
var threads = 1
// TODO 线程数应该可以在缓存策略和节点中设定
var threads = runtime.NumCPU()
for i := 0; i < threads; i++ {
goman.New(func() {

View File

@@ -8,6 +8,7 @@ import (
"runtime"
"runtime/debug"
"strconv"
"sync"
"testing"
"time"
)
@@ -304,3 +305,30 @@ func TestMemoryStorage_Stop(t *testing.T) {
t.Log(len(m))
}
func BenchmarkValuesMap(b *testing.B) {
var m = map[uint64]*MemoryItem{}
var count = 1_000_000
for i := 0; i < count; i++ {
m[uint64(i)] = &MemoryItem{
ExpiresAt: time.Now().Unix(),
}
}
b.Log(len(m))
var locker = sync.Mutex{}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
locker.Lock()
_, ok := m[uint64(rands.Int(0, 1_000_000))]
_ = ok
locker.Unlock()
locker.Lock()
delete(m, uint64(rands.Int(2, 1000000)))
locker.Unlock()
}
})
}

View File

@@ -5,10 +5,46 @@ package compressions_test
import (
"bytes"
"github.com/TeaOSLab/EdgeNode/internal/compressions"
stringutil "github.com/iwind/TeaGo/utils/string"
"strings"
"testing"
"time"
)
func TestBrotliWriter_LargeFile(t *testing.T) {
var data = []byte{}
for i := 0; i < 1024*1024; i++ {
data = append(data, stringutil.Rand(32)...)
}
t.Log(len(data)/1024/1024, "M")
var before = time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
var buf = &bytes.Buffer{}
writer, err := compressions.NewBrotliWriter(buf, 5)
if err != nil {
t.Fatal(err)
}
var offset = 0
var size = 4096
for offset < len(data) {
_, err = writer.Write(data[offset : offset+size])
if err != nil {
t.Fatal(err)
}
offset += size
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
}
func BenchmarkBrotliWriter_Write(b *testing.B) {
var data = []byte(strings.Repeat("A", 1024))

View File

@@ -121,7 +121,7 @@ func (this *ProvinceManager) loop() error {
if err != nil {
return err
}
resp, err := rpcClient.RegionProvinceRPC().FindAllRegionProvincesWithCountryId(rpcClient.Context(), &pb.FindAllRegionProvincesWithCountryIdRequest{
resp, err := rpcClient.RegionProvinceRPC().FindAllRegionProvincesWithRegionCountryId(rpcClient.Context(), &pb.FindAllRegionProvincesWithRegionCountryIdRequest{
RegionCountryId: ChinaCountryId,
})
if err != nil {

View File

@@ -23,10 +23,11 @@ import (
"time"
)
const MaxQueueSize = 2048
const MaxQueueSize = 256 // TODO 可以配置,可以在单个任务里配置
// Task 单个指标任务
// 数据库存储:
//
// data/
// metric.$ID.db
// stats
@@ -372,7 +373,9 @@ func (this *Task) Upload(pauseDuration time.Duration) error {
for _, serverId := range serverIds {
for _, currentTime := range times {
idStrings, err := func(serverId int64, currentTime string) (ids []string, err error) {
var t = trackers.Begin("[METRIC]SELECT_TOP_STMT")
rows, err := this.selectTopStmt.Query(serverId, this.item.Version, currentTime)
t.End()
if err != nil {
return nil, err
}

View File

@@ -161,6 +161,7 @@ func (this *HTTPClientPool) Client(req *HTTPRequest,
ExpectContinueTimeout: 1 * time.Second,
TLSHandshakeTimeout: 5 * time.Second,
TLSClientConfig: tlsConfig,
ReadBufferSize: 8 * 1024,
Proxy: nil,
},
}