Integrate public as bindata optionally (#293)

* Dropped unused codekit config

* Integrated dynamic and static bindata for public

* Ignore public bindata

* Add a general generate make task

* Integrated flexible public assets into web command

* Updated vendoring, added all missiong govendor deps

* Made the linter happy with the bindata and dynamic code

* Moved public bindata definition to modules directory

* Ignoring the new bindata path now

* Updated to the new public modules import path

* Updated public bindata command and drop the new prefix
This commit is contained in:
Thomas Boerger
2016-11-29 17:26:36 +01:00
committed by Lunny Xiao
parent 4680c349dd
commit b6a95a8cb3
691 changed files with 305318 additions and 1272 deletions

View File

@@ -0,0 +1,202 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package boltdb
import (
"os"
"path"
"github.com/boltdb/bolt"
"github.com/juju/errors"
"github.com/pingcap/tidb/store/localstore/engine"
"github.com/pingcap/tidb/util/bytes"
)
var (
_ engine.DB = (*db)(nil)
)
var (
bucketName = []byte("tidb")
)
type db struct {
*bolt.DB
}
func (d *db) Get(key []byte) ([]byte, error) {
var value []byte
err := d.DB.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
v := b.Get(key)
if v == nil {
return errors.Trace(engine.ErrNotFound)
}
value = bytes.CloneBytes(v)
return nil
})
return value, errors.Trace(err)
}
func (d *db) MultiSeek(keys [][]byte) []*engine.MSeekResult {
res := make([]*engine.MSeekResult, 0, len(keys))
d.DB.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
c := b.Cursor()
for _, key := range keys {
var k, v []byte
if key == nil {
k, v = c.First()
} else {
k, v = c.Seek(key)
}
r := &engine.MSeekResult{}
if k == nil {
r.Err = engine.ErrNotFound
} else {
r.Key, r.Value, r.Err = bytes.CloneBytes(k), bytes.CloneBytes(v), nil
}
res = append(res, r)
}
return nil
})
return res
}
func (d *db) Seek(startKey []byte) ([]byte, []byte, error) {
var key, value []byte
err := d.DB.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
c := b.Cursor()
var k, v []byte
if startKey == nil {
k, v = c.First()
} else {
k, v = c.Seek(startKey)
}
if k != nil {
key, value = bytes.CloneBytes(k), bytes.CloneBytes(v)
}
return nil
})
if err != nil {
return nil, nil, errors.Trace(err)
}
if key == nil {
return nil, nil, errors.Trace(engine.ErrNotFound)
}
return key, value, nil
}
func (d *db) NewBatch() engine.Batch {
return &batch{}
}
func (d *db) Commit(b engine.Batch) error {
bt, ok := b.(*batch)
if !ok {
return errors.Errorf("invalid batch type %T", b)
}
err := d.DB.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
// err1 is used for passing `go tool vet --shadow` check.
var err1 error
for _, w := range bt.writes {
if !w.isDelete {
err1 = b.Put(w.key, w.value)
} else {
err1 = b.Delete(w.key)
}
if err1 != nil {
return errors.Trace(err1)
}
}
return nil
})
return errors.Trace(err)
}
func (d *db) Close() error {
return d.DB.Close()
}
type write struct {
key []byte
value []byte
isDelete bool
}
type batch struct {
writes []write
}
func (b *batch) Put(key []byte, value []byte) {
w := write{
key: append([]byte(nil), key...),
value: append([]byte(nil), value...),
}
b.writes = append(b.writes, w)
}
func (b *batch) Delete(key []byte) {
w := write{
key: append([]byte(nil), key...),
value: nil,
isDelete: true,
}
b.writes = append(b.writes, w)
}
func (b *batch) Len() int {
return len(b.writes)
}
// Driver implements engine Driver.
type Driver struct {
}
// Open opens or creates a local storage database with given path.
func (driver Driver) Open(dbPath string) (engine.DB, error) {
base := path.Dir(dbPath)
os.MkdirAll(base, 0755)
d, err := bolt.Open(dbPath, 0600, nil)
if err != nil {
return nil, err
}
tx, err := d.Begin(true)
if err != nil {
return nil, err
}
if _, err = tx.CreateBucketIfNotExists(bucketName); err != nil {
tx.Rollback()
return nil, err
}
if err = tx.Commit(); err != nil {
return nil, err
}
return &db{d}, nil
}

View File

@@ -0,0 +1,215 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package localstore
import (
"sync"
"time"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/localstore/engine"
"github.com/pingcap/tidb/terror"
"github.com/pingcap/tidb/util/bytes"
)
const (
deleteWorkerCnt = 3
)
// compactPolicy defines gc policy of MVCC storage.
type compactPolicy struct {
// SafePoint specifies
SafePoint int
// TriggerInterval specifies how often should the compactor
// scans outdated data.
TriggerInterval time.Duration
// BatchDeleteCnt specifies the batch size for
// deleting outdated data transaction.
BatchDeleteCnt int
}
var localCompactDefaultPolicy = compactPolicy{
SafePoint: 20 * 1000, // in ms
TriggerInterval: 10 * time.Second,
BatchDeleteCnt: 100,
}
type localstoreCompactor struct {
mu sync.Mutex
recentKeys map[string]struct{}
stopCh chan struct{}
delCh chan kv.EncodedKey
workerWaitGroup *sync.WaitGroup
ticker *time.Ticker
db engine.DB
policy compactPolicy
}
func (gc *localstoreCompactor) OnSet(k kv.Key) {
gc.mu.Lock()
defer gc.mu.Unlock()
gc.recentKeys[string(k)] = struct{}{}
}
func (gc *localstoreCompactor) OnDelete(k kv.Key) {
gc.mu.Lock()
defer gc.mu.Unlock()
gc.recentKeys[string(k)] = struct{}{}
}
func (gc *localstoreCompactor) getAllVersions(key kv.Key) ([]kv.EncodedKey, error) {
var keys []kv.EncodedKey
k := key
for ver := kv.MaxVersion; ver.Ver > 0; ver.Ver-- {
mvccK, _, err := gc.db.Seek(MvccEncodeVersionKey(key, ver))
if terror.ErrorEqual(err, engine.ErrNotFound) {
break
}
if err != nil {
return nil, errors.Trace(err)
}
k, ver, err = MvccDecode(mvccK)
if k.Cmp(key) != 0 {
break
}
if err != nil {
return nil, errors.Trace(err)
}
keys = append(keys, bytes.CloneBytes(mvccK))
}
return keys, nil
}
func (gc *localstoreCompactor) deleteWorker() {
defer gc.workerWaitGroup.Done()
cnt := 0
batch := gc.db.NewBatch()
for {
select {
case <-gc.stopCh:
return
case key := <-gc.delCh:
cnt++
batch.Delete(key)
// Batch delete.
if cnt == gc.policy.BatchDeleteCnt {
log.Debugf("[kv] GC delete commit %d keys", batch.Len())
err := gc.db.Commit(batch)
if err != nil {
log.Error(err)
}
batch = gc.db.NewBatch()
cnt = 0
}
}
}
}
func (gc *localstoreCompactor) checkExpiredKeysWorker() {
defer gc.workerWaitGroup.Done()
for {
select {
case <-gc.stopCh:
log.Debug("[kv] GC stopped")
return
case <-gc.ticker.C:
gc.mu.Lock()
m := gc.recentKeys
if len(m) == 0 {
gc.mu.Unlock()
continue
}
gc.recentKeys = make(map[string]struct{})
gc.mu.Unlock()
for k := range m {
err := gc.Compact([]byte(k))
if err != nil {
log.Error(err)
}
}
}
}
}
func (gc *localstoreCompactor) filterExpiredKeys(keys []kv.EncodedKey) []kv.EncodedKey {
var ret []kv.EncodedKey
first := true
currentTS := time.Now().UnixNano() / int64(time.Millisecond)
// keys are always in descending order.
for _, k := range keys {
_, ver, err := MvccDecode(k)
if err != nil {
// Should not happen.
panic(err)
}
ts := localVersionToTimestamp(ver)
// Check timeout keys.
if currentTS-int64(ts) >= int64(gc.policy.SafePoint) {
// Skip first version.
if first {
first = false
continue
}
ret = append(ret, k)
}
}
return ret
}
func (gc *localstoreCompactor) Compact(k kv.Key) error {
keys, err := gc.getAllVersions(k)
if err != nil {
return errors.Trace(err)
}
filteredKeys := gc.filterExpiredKeys(keys)
if len(filteredKeys) > 0 {
log.Debugf("[kv] GC send %d keys to delete worker", len(filteredKeys))
}
for _, key := range filteredKeys {
gc.delCh <- key
}
return nil
}
func (gc *localstoreCompactor) Start() {
// Start workers.
gc.workerWaitGroup.Add(deleteWorkerCnt)
for i := 0; i < deleteWorkerCnt; i++ {
go gc.deleteWorker()
}
gc.workerWaitGroup.Add(1)
go gc.checkExpiredKeysWorker()
}
func (gc *localstoreCompactor) Stop() {
gc.ticker.Stop()
close(gc.stopCh)
// Wait for all workers to finish.
gc.workerWaitGroup.Wait()
}
func newLocalCompactor(policy compactPolicy, db engine.DB) *localstoreCompactor {
return &localstoreCompactor{
recentKeys: make(map[string]struct{}),
stopCh: make(chan struct{}),
delCh: make(chan kv.EncodedKey, 100),
ticker: time.NewTicker(policy.TriggerInterval),
policy: policy,
db: db,
workerWaitGroup: &sync.WaitGroup{},
}
}

View File

@@ -0,0 +1,60 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package engine
import "github.com/juju/errors"
// ErrNotFound indicates no key is found when trying Get or Seek an entry from DB.
var ErrNotFound = errors.New("local engine: key not found")
// Driver is the interface that must be implemented by a local storage db engine.
type Driver interface {
// Open opens or creates a local storage DB.
// The schema is a string for a local storage DB specific format.
Open(schema string) (DB, error)
}
// MSeekResult is used to get multiple seek results.
type MSeekResult struct {
Key []byte
Value []byte
Err error
}
// DB is the interface for local storage.
type DB interface {
// Get gets the associated value with key, returns (nil, ErrNotFound) if no value found.
Get(key []byte) ([]byte, error)
// Seek searches for the first key in the engine which is >= key in byte order, returns (nil, nil, ErrNotFound)
// if such key is not found.
Seek(key []byte) ([]byte, []byte, error)
// MultiSeek seeks multiple keys from the engine.
MultiSeek(keys [][]byte) []*MSeekResult
// NewBatch creates a Batch for writing.
NewBatch() Batch
// Commit writes the changed data in Batch.
Commit(b Batch) error
// Close closes database.
Close() error
}
// Batch is the interface for local storage.
type Batch interface {
// Put appends 'put operation' of the key/value to the batch.
Put(key []byte, value []byte)
// Delete appends 'delete operation' of the key/value to the batch.
Delete(key []byte)
// Len return length of the batch
Len() int
}

View File

@@ -0,0 +1,118 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package goleveldb
import (
"sync"
"github.com/juju/errors"
"github.com/pingcap/tidb/store/localstore/engine"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
)
var (
_ engine.DB = (*db)(nil)
_ engine.Batch = (*leveldb.Batch)(nil)
)
var (
p = sync.Pool{
New: func() interface{} {
return &leveldb.Batch{}
},
}
)
type db struct {
*leveldb.DB
}
func (d *db) Get(key []byte) ([]byte, error) {
v, err := d.DB.Get(key, nil)
if err == leveldb.ErrNotFound {
return nil, errors.Trace(engine.ErrNotFound)
}
return v, err
}
func (d *db) NewBatch() engine.Batch {
b := p.Get().(*leveldb.Batch)
return b
}
func (d *db) Seek(startKey []byte) ([]byte, []byte, error) {
iter := d.DB.NewIterator(&util.Range{Start: startKey}, nil)
defer iter.Release()
if ok := iter.First(); !ok {
return nil, nil, errors.Trace(engine.ErrNotFound)
}
return iter.Key(), iter.Value(), nil
}
func (d *db) MultiSeek(keys [][]byte) []*engine.MSeekResult {
iter := d.DB.NewIterator(&util.Range{Start: []byte{0x0}}, nil)
defer iter.Release()
res := make([]*engine.MSeekResult, 0, len(keys))
for _, k := range keys {
if ok := iter.Seek(k); !ok {
res = append(res, &engine.MSeekResult{Err: engine.ErrNotFound})
} else {
res = append(res, &engine.MSeekResult{
Key: append([]byte(nil), iter.Key()...),
Value: append([]byte(nil), iter.Value()...),
})
}
}
return res
}
func (d *db) Commit(b engine.Batch) error {
batch, ok := b.(*leveldb.Batch)
if !ok {
return errors.Errorf("invalid batch type %T", b)
}
err := d.DB.Write(batch, nil)
batch.Reset()
p.Put(batch)
return err
}
func (d *db) Close() error {
return d.DB.Close()
}
// Driver implements engine Driver.
type Driver struct {
}
// Open opens or creates a local storage database for the given path.
func (driver Driver) Open(path string) (engine.DB, error) {
d, err := leveldb.OpenFile(path, &opt.Options{BlockCacheCapacity: 600 * 1024 * 1024})
return &db{d}, err
}
// MemoryDriver implements engine Driver
type MemoryDriver struct {
}
// Open opens a memory storage database.
func (driver MemoryDriver) Open(path string) (engine.DB, error) {
d, err := leveldb.Open(storage.NewMemStorage(), nil)
return &db{d}, err
}

466
vendor/github.com/pingcap/tidb/store/localstore/kv.go generated vendored Normal file
View File

@@ -0,0 +1,466 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package localstore
import (
"net/url"
"path/filepath"
"runtime/debug"
"sync"
"time"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/localstore/engine"
"github.com/pingcap/tidb/util/segmentmap"
"github.com/twinj/uuid"
)
var (
_ kv.Storage = (*dbStore)(nil)
)
type op int
const (
opSeek = iota + 1
opCommit
)
const (
maxSeekWorkers = 3
lowerWaterMark = 10 // second
)
type command struct {
op op
txn *dbTxn
args interface{}
reply interface{}
done chan error
}
type seekReply struct {
key []byte
value []byte
}
type commitReply struct {
err error
}
type seekArgs struct {
key []byte
}
type commitArgs struct {
}
// Seek searches for the first key in the engine which is >= key in byte order, returns (nil, nil, ErrNotFound)
// if such key is not found.
func (s *dbStore) Seek(key []byte) ([]byte, []byte, error) {
c := &command{
op: opSeek,
args: &seekArgs{key: key},
done: make(chan error, 1),
}
s.commandCh <- c
err := <-c.done
if err != nil {
return nil, nil, errors.Trace(err)
}
reply := c.reply.(*seekReply)
return reply.key, reply.value, nil
}
// Commit writes the changed data in Batch.
func (s *dbStore) CommitTxn(txn *dbTxn) error {
if len(txn.lockedKeys) == 0 {
return nil
}
c := &command{
op: opCommit,
txn: txn,
args: &commitArgs{},
done: make(chan error, 1),
}
s.commandCh <- c
err := <-c.done
return errors.Trace(err)
}
func (s *dbStore) seekWorker(wg *sync.WaitGroup, seekCh chan *command) {
defer wg.Done()
for {
var pending []*command
select {
case cmd, ok := <-seekCh:
if !ok {
return
}
pending = append(pending, cmd)
L:
for {
select {
case cmd, ok := <-seekCh:
if !ok {
break L
}
pending = append(pending, cmd)
default:
break L
}
}
}
s.doSeek(pending)
}
}
func (s *dbStore) scheduler() {
closed := false
seekCh := make(chan *command, 1000)
wgSeekWorkers := &sync.WaitGroup{}
wgSeekWorkers.Add(maxSeekWorkers)
for i := 0; i < maxSeekWorkers; i++ {
go s.seekWorker(wgSeekWorkers, seekCh)
}
segmentIndex := int64(0)
tick := time.NewTicker(time.Second)
defer tick.Stop()
for {
select {
case cmd := <-s.commandCh:
if closed {
cmd.done <- ErrDBClosed
continue
}
switch cmd.op {
case opSeek:
seekCh <- cmd
case opCommit:
s.doCommit(cmd)
}
case <-s.closeCh:
closed = true
// notify seek worker to exit
close(seekCh)
wgSeekWorkers.Wait()
s.wg.Done()
case <-tick.C:
segmentIndex = segmentIndex % s.recentUpdates.SegmentCount()
s.cleanRecentUpdates(segmentIndex)
segmentIndex++
}
}
}
func (s *dbStore) cleanRecentUpdates(segmentIndex int64) {
m, err := s.recentUpdates.GetSegment(segmentIndex)
if err != nil {
log.Error(err)
return
}
now := time.Now().Unix()
for k, v := range m {
dis := now - version2Second(v.(kv.Version))
if dis > lowerWaterMark {
delete(m, k)
}
}
}
func (s *dbStore) tryLock(txn *dbTxn) (err error) {
// check conflict
for k := range txn.lockedKeys {
if _, ok := s.keysLocked[k]; ok {
return errors.Trace(kv.ErrLockConflict)
}
lastVer, ok := s.recentUpdates.Get([]byte(k))
if !ok {
continue
}
// If there's newer version of this key, returns error.
if lastVer.(kv.Version).Cmp(kv.Version{Ver: txn.tid}) > 0 {
return errors.Trace(kv.ErrConditionNotMatch)
}
}
// record
for k := range txn.lockedKeys {
s.keysLocked[k] = txn.tid
}
return nil
}
func (s *dbStore) doCommit(cmd *command) {
txn := cmd.txn
curVer, err := globalVersionProvider.CurrentVersion()
if err != nil {
log.Fatal(err)
}
err = s.tryLock(txn)
if err != nil {
cmd.done <- errors.Trace(err)
return
}
// Update commit version.
txn.version = curVer
b := s.db.NewBatch()
txn.us.WalkBuffer(func(k kv.Key, value []byte) error {
mvccKey := MvccEncodeVersionKey(kv.Key(k), curVer)
if len(value) == 0 { // Deleted marker
b.Put(mvccKey, nil)
s.compactor.OnDelete(k)
} else {
b.Put(mvccKey, value)
s.compactor.OnSet(k)
}
return nil
})
err = s.writeBatch(b)
s.unLockKeys(txn)
cmd.done <- errors.Trace(err)
}
func (s *dbStore) doSeek(seekCmds []*command) {
keys := make([][]byte, 0, len(seekCmds))
for _, cmd := range seekCmds {
keys = append(keys, cmd.args.(*seekArgs).key)
}
results := s.db.MultiSeek(keys)
for i, cmd := range seekCmds {
reply := &seekReply{}
var err error
reply.key, reply.value, err = results[i].Key, results[i].Value, results[i].Err
cmd.reply = reply
cmd.done <- errors.Trace(err)
}
}
func (s *dbStore) NewBatch() engine.Batch {
return s.db.NewBatch()
}
type dbStore struct {
db engine.DB
txns map[uint64]*dbTxn
keysLocked map[string]uint64
// TODO: clean up recentUpdates
recentUpdates *segmentmap.SegmentMap
uuid string
path string
compactor *localstoreCompactor
wg *sync.WaitGroup
commandCh chan *command
closeCh chan struct{}
mu sync.Mutex
closed bool
}
type storeCache struct {
mu sync.Mutex
cache map[string]*dbStore
}
var (
globalVersionProvider kv.VersionProvider
mc storeCache
// ErrDBClosed is the error meaning db is closed and we can use it anymore.
ErrDBClosed = errors.New("db is closed")
)
func init() {
mc.cache = make(map[string]*dbStore)
globalVersionProvider = &LocalVersionProvider{}
}
// Driver implements kv.Driver interface.
type Driver struct {
// engine.Driver is the engine driver for different local db engine.
engine.Driver
}
// IsLocalStore checks whether a storage is local or not.
func IsLocalStore(s kv.Storage) bool {
_, ok := s.(*dbStore)
return ok
}
// Open opens or creates a storage with specific format for a local engine Driver.
// The path should be a URL format which is described in tidb package.
func (d Driver) Open(path string) (kv.Storage, error) {
mc.mu.Lock()
defer mc.mu.Unlock()
u, err := url.Parse(path)
if err != nil {
return nil, errors.Trace(err)
}
engineSchema := filepath.Join(u.Host, u.Path)
if store, ok := mc.cache[engineSchema]; ok {
// TODO: check the cache store has the same engine with this Driver.
log.Info("[kv] cache store", engineSchema)
return store, nil
}
db, err := d.Driver.Open(engineSchema)
if err != nil {
return nil, errors.Trace(err)
}
log.Info("[kv] New store", engineSchema)
s := &dbStore{
txns: make(map[uint64]*dbTxn),
keysLocked: make(map[string]uint64),
uuid: uuid.NewV4().String(),
path: engineSchema,
db: db,
compactor: newLocalCompactor(localCompactDefaultPolicy, db),
commandCh: make(chan *command, 1000),
closed: false,
closeCh: make(chan struct{}),
wg: &sync.WaitGroup{},
}
s.recentUpdates, err = segmentmap.NewSegmentMap(100)
if err != nil {
return nil, errors.Trace(err)
}
mc.cache[engineSchema] = s
s.compactor.Start()
s.wg.Add(1)
go s.scheduler()
return s, nil
}
func (s *dbStore) UUID() string {
return s.uuid
}
func (s *dbStore) GetSnapshot(ver kv.Version) (kv.Snapshot, error) {
s.mu.Lock()
if s.closed {
s.mu.Unlock()
return nil, ErrDBClosed
}
s.mu.Unlock()
currentVer, err := globalVersionProvider.CurrentVersion()
if err != nil {
return nil, errors.Trace(err)
}
if ver.Cmp(currentVer) > 0 {
ver = currentVer
}
return &dbSnapshot{
store: s,
version: ver,
}, nil
}
func (s *dbStore) CurrentVersion() (kv.Version, error) {
return globalVersionProvider.CurrentVersion()
}
// Begin transaction
func (s *dbStore) Begin() (kv.Transaction, error) {
s.mu.Lock()
if s.closed {
s.mu.Unlock()
return nil, ErrDBClosed
}
s.mu.Unlock()
beginVer, err := globalVersionProvider.CurrentVersion()
if err != nil {
return nil, errors.Trace(err)
}
return newTxn(s, beginVer), nil
}
func (s *dbStore) Close() error {
s.mu.Lock()
if s.closed {
s.mu.Unlock()
return ErrDBClosed
}
s.closed = true
s.mu.Unlock()
mc.mu.Lock()
defer mc.mu.Unlock()
s.compactor.Stop()
s.closeCh <- struct{}{}
s.wg.Wait()
delete(mc.cache, s.path)
return s.db.Close()
}
func (s *dbStore) writeBatch(b engine.Batch) error {
if b.Len() == 0 {
return nil
}
if s.closed {
return errors.Trace(ErrDBClosed)
}
err := s.db.Commit(b)
if err != nil {
log.Error(err)
return errors.Trace(err)
}
return nil
}
func (s *dbStore) newBatch() engine.Batch {
return s.db.NewBatch()
}
func (s *dbStore) unLockKeys(txn *dbTxn) error {
for k := range txn.lockedKeys {
if tid, ok := s.keysLocked[k]; !ok || tid != txn.tid {
debug.PrintStack()
log.Fatalf("should never happend:%v, %v", tid, txn.tid)
}
delete(s.keysLocked, k)
s.recentUpdates.Set([]byte(k), txn.version, true)
}
return nil
}

View File

@@ -0,0 +1,67 @@
package localstore
import (
"errors"
"sync"
"time"
"github.com/ngaut/log"
"github.com/pingcap/tidb/kv"
)
// ErrOverflow is the error returned by CurrentVersion, it describes if
// there're too many versions allocations in a very short period of time, ID
// may conflict.
var ErrOverflow = errors.New("overflow when allocating new version")
// LocalVersionProvider uses local timestamp for version.
type LocalVersionProvider struct {
mu sync.Mutex
lastTimestamp uint64
// logical guaranteed version's monotonic increasing for calls when lastTimestamp
// are equal.
logical uint64
}
const (
timePrecisionOffset = 18
)
func time2TsPhysical(t time.Time) uint64 {
return uint64((t.UnixNano() / int64(time.Millisecond)) << timePrecisionOffset)
}
func version2Second(v kv.Version) int64 {
return int64(v.Ver>>timePrecisionOffset) / 1000
}
// CurrentVersion implements the VersionProvider's GetCurrentVer interface.
func (l *LocalVersionProvider) CurrentVersion() (kv.Version, error) {
l.mu.Lock()
defer l.mu.Unlock()
for {
var ts uint64
ts = time2TsPhysical(time.Now())
if l.lastTimestamp > ts {
log.Error("[kv] invalid physical time stamp")
continue
}
if l.lastTimestamp == uint64(ts) {
l.logical++
if l.logical >= 1<<timePrecisionOffset {
return kv.Version{}, ErrOverflow
}
return kv.Version{Ver: ts + l.logical}, nil
}
l.lastTimestamp = ts
l.logical = 0
return kv.Version{Ver: ts}, nil
}
}
func localVersionToTimestamp(ver kv.Version) uint64 {
return ver.Ver >> timePrecisionOffset
}

View File

@@ -0,0 +1,59 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package localstore
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/util/codec"
)
// ErrInvalidEncodedKey describes parsing an invalid format of EncodedKey.
var ErrInvalidEncodedKey = errors.New("invalid encoded key")
func isTombstone(v []byte) bool {
return len(v) == 0
}
// MvccEncodeVersionKey returns the encoded key.
func MvccEncodeVersionKey(key kv.Key, ver kv.Version) kv.EncodedKey {
b := codec.EncodeBytes(nil, key)
ret := codec.EncodeUintDesc(b, ver.Ver)
return ret
}
// MvccDecode parses the origin key and version of an encoded key, if the encoded key is a meta key,
// just returns the origin key.
func MvccDecode(encodedKey kv.EncodedKey) (kv.Key, kv.Version, error) {
// Skip DataPrefix
remainBytes, key, err := codec.DecodeBytes([]byte(encodedKey))
if err != nil {
// should never happen
return nil, kv.Version{}, errors.Trace(err)
}
// if it's meta key
if len(remainBytes) == 0 {
return key, kv.Version{}, nil
}
var ver uint64
remainBytes, ver, err = codec.DecodeUintDesc(remainBytes)
if err != nil {
// should never happen
return nil, kv.Version{}, errors.Trace(err)
}
if len(remainBytes) != 0 {
return nil, kv.Version{}, ErrInvalidEncodedKey
}
return key, kv.Version{Ver: ver}, nil
}

View File

@@ -0,0 +1,178 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package localstore
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/localstore/engine"
"github.com/pingcap/tidb/terror"
)
var (
_ kv.Snapshot = (*dbSnapshot)(nil)
_ kv.Iterator = (*dbIter)(nil)
)
// dbSnapshot implements MvccSnapshot interface.
type dbSnapshot struct {
store *dbStore
version kv.Version // transaction begin version
}
func newSnapshot(store *dbStore, ver kv.Version) *dbSnapshot {
ss := &dbSnapshot{
store: store,
version: ver,
}
return ss
}
// mvccSeek seeks for the first key in db which has a k >= key and a version <=
// snapshot's version, returns kv.ErrNotExist if such key is not found. If exact
// is true, only k == key can be returned.
func (s *dbSnapshot) mvccSeek(key kv.Key, exact bool) (kv.Key, []byte, error) {
// Key layout:
// ...
// Key_verMax -- (1)
// ...
// Key_ver+1 -- (2)
// Key_ver -- (3)
// Key_ver-1 -- (4)
// ...
// Key_0 -- (5)
// NextKey_verMax -- (6)
// ...
// NextKey_ver+1 -- (7)
// NextKey_ver -- (8)
// NextKey_ver-1 -- (9)
// ...
// NextKey_0 -- (10)
// ...
// EOF
for {
mvccKey := MvccEncodeVersionKey(key, s.version)
mvccK, v, err := s.store.Seek([]byte(mvccKey)) // search for [3...EOF)
if err != nil {
if terror.ErrorEqual(err, engine.ErrNotFound) { // EOF
return nil, nil, errors.Trace(kv.ErrNotExist)
}
return nil, nil, errors.Trace(err)
}
k, ver, err := MvccDecode(mvccK)
if err != nil {
return nil, nil, errors.Trace(err)
}
// quick test for exact mode
if exact {
if key.Cmp(k) != 0 || isTombstone(v) {
return nil, nil, errors.Trace(kv.ErrNotExist)
}
return k, v, nil
}
if ver.Ver > s.version.Ver {
// currently on [6...7]
key = k // search for [8...EOF) next loop
continue
}
// currently on [3...5] or [8...10]
if isTombstone(v) {
key = k.Next() // search for (5...EOF) or (10..EOF) next loop
continue
}
// target found
return k, v, nil
}
}
func (s *dbSnapshot) Get(key kv.Key) ([]byte, error) {
_, v, err := s.mvccSeek(key, true)
if err != nil {
return nil, errors.Trace(err)
}
return v, nil
}
func (s *dbSnapshot) BatchGet(keys []kv.Key) (map[string][]byte, error) {
m := make(map[string][]byte)
for _, k := range keys {
v, err := s.Get(k)
if err != nil && !kv.IsErrNotFound(err) {
return nil, errors.Trace(err)
}
if len(v) > 0 {
m[string(k)] = v
}
}
return m, nil
}
func (s *dbSnapshot) Seek(k kv.Key) (kv.Iterator, error) {
it, err := newDBIter(s, k)
return it, errors.Trace(err)
}
func (s *dbSnapshot) Release() {
}
type dbIter struct {
s *dbSnapshot
valid bool
k kv.Key
v []byte
}
func newDBIter(s *dbSnapshot, startKey kv.Key) (*dbIter, error) {
k, v, err := s.mvccSeek(startKey, false)
if err != nil {
if terror.ErrorEqual(err, kv.ErrNotExist) {
err = nil
}
return &dbIter{valid: false}, errors.Trace(err)
}
return &dbIter{
s: s,
valid: true,
k: k,
v: v,
}, nil
}
func (it *dbIter) Next() error {
k, v, err := it.s.mvccSeek(it.k.Next(), false)
if err != nil {
it.valid = false
if !terror.ErrorEqual(err, kv.ErrNotExist) {
return errors.Trace(err)
}
}
it.k, it.v = k, v
return nil
}
func (it *dbIter) Valid() bool {
return it.valid
}
func (it *dbIter) Key() kv.Key {
return it.k
}
func (it *dbIter) Value() []byte {
return it.v
}
func (it *dbIter) Close() {}

160
vendor/github.com/pingcap/tidb/store/localstore/txn.go generated vendored Normal file
View File

@@ -0,0 +1,160 @@
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package localstore
import (
"fmt"
"github.com/juju/errors"
"github.com/ngaut/log"
"github.com/pingcap/tidb/kv"
)
var (
_ kv.Transaction = (*dbTxn)(nil)
)
// dbTxn is not thread safe
type dbTxn struct {
us kv.UnionStore
store *dbStore // for commit
tid uint64
valid bool
version kv.Version // commit version
lockedKeys map[string]struct{} // origin version in snapshot
dirty bool
}
func newTxn(s *dbStore, ver kv.Version) *dbTxn {
txn := &dbTxn{
us: kv.NewUnionStore(newSnapshot(s, ver)),
store: s,
tid: ver.Ver,
valid: true,
version: kv.MinVersion,
lockedKeys: make(map[string]struct{}),
}
log.Debugf("[kv] Begin txn:%d", txn.tid)
return txn
}
// Implement transaction interface
func (txn *dbTxn) Get(k kv.Key) ([]byte, error) {
log.Debugf("[kv] get key:%q, txn:%d", k, txn.tid)
return txn.us.Get(k)
}
func (txn *dbTxn) Set(k kv.Key, data []byte) error {
log.Debugf("[kv] set key:%q, txn:%d", k, txn.tid)
txn.dirty = true
return txn.us.Set(k, data)
}
func (txn *dbTxn) String() string {
return fmt.Sprintf("%d", txn.tid)
}
func (txn *dbTxn) Seek(k kv.Key) (kv.Iterator, error) {
log.Debugf("[kv] seek key:%q, txn:%d", k, txn.tid)
return txn.us.Seek(k)
}
func (txn *dbTxn) Delete(k kv.Key) error {
log.Debugf("[kv] delete key:%q, txn:%d", k, txn.tid)
txn.dirty = true
return txn.us.Delete(k)
}
func (txn *dbTxn) SetOption(opt kv.Option, val interface{}) {
txn.us.SetOption(opt, val)
}
func (txn *dbTxn) DelOption(opt kv.Option) {
txn.us.DelOption(opt)
}
func (txn *dbTxn) doCommit() error {
// check lazy condition pairs
if err := txn.us.CheckLazyConditionPairs(); err != nil {
return errors.Trace(err)
}
err := txn.us.WalkBuffer(func(k kv.Key, v []byte) error {
e := txn.LockKeys(k)
return errors.Trace(e)
})
if err != nil {
return errors.Trace(err)
}
return txn.store.CommitTxn(txn)
}
func (txn *dbTxn) Commit() error {
if !txn.valid {
return errors.Trace(kv.ErrInvalidTxn)
}
log.Debugf("[kv] commit txn %d", txn.tid)
defer func() {
txn.close()
}()
return errors.Trace(txn.doCommit())
}
func (txn *dbTxn) close() error {
txn.us.Release()
txn.lockedKeys = nil
txn.valid = false
return nil
}
func (txn *dbTxn) Rollback() error {
if !txn.valid {
return errors.Trace(kv.ErrInvalidTxn)
}
log.Warnf("[kv] Rollback txn %d", txn.tid)
return txn.close()
}
func (txn *dbTxn) LockKeys(keys ...kv.Key) error {
for _, key := range keys {
txn.lockedKeys[string(key)] = struct{}{}
}
return nil
}
func (txn *dbTxn) IsReadOnly() bool {
return !txn.dirty
}
func (txn *dbTxn) StartTS() int64 {
return int64(txn.tid)
}
func (txn *dbTxn) GetClient() kv.Client {
return nil
}
type dbClient struct {
}
func (c *dbClient) SupportRequestType(reqType, subType int64) bool {
return false
}
func (c *dbClient) Send(req *kv.Request) kv.Response {
return nil
}