diff --git a/circle.yml b/circle.yml index 3dba976b..104cfa6f 100644 --- a/circle.yml +++ b/circle.yml @@ -15,7 +15,7 @@ dependencies: test: override: - - cd $PROJECT_PATH && make get_vendor_deps && make metalinter_test && bash ./test.sh + - cd $PROJECT_PATH && make get_vendor_deps && bash ./test.sh post: - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" diff --git a/db/backend_test.go b/db/backend_test.go index b4ffecdc..3362fecf 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -2,42 +2,80 @@ package db import ( "fmt" + "os" + "path/filepath" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" cmn "github.com/tendermint/tmlibs/common" ) -func testBackend(t *testing.T, backend string) { +func cleanupDBDir(dir, name string) { + os.RemoveAll(filepath.Join(dir, name) + ".db") +} + +func testBackendGetSetDelete(t *testing.T, backend string) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() db := NewDB("testdb", backend, dirname) - require.Nil(t, db.Get([]byte(""))) - require.Nil(t, db.Get(nil)) + + key := []byte("abc") + require.Nil(t, db.Get(key)) // Set empty ("") - db.Set([]byte(""), []byte("")) - require.NotNil(t, db.Get([]byte(""))) - require.NotNil(t, db.Get(nil)) - require.Empty(t, db.Get([]byte(""))) - require.Empty(t, db.Get(nil)) + db.Set(key, []byte("")) + require.NotNil(t, db.Get(key)) + require.Empty(t, db.Get(key)) // Set empty (nil) - db.Set([]byte(""), nil) - require.NotNil(t, db.Get([]byte(""))) - require.NotNil(t, db.Get(nil)) - require.Empty(t, db.Get([]byte(""))) - require.Empty(t, db.Get(nil)) + db.Set(key, nil) + require.NotNil(t, db.Get(key)) + require.Empty(t, db.Get(key)) // Delete - db.Delete([]byte("")) - require.Nil(t, db.Get([]byte(""))) - require.Nil(t, db.Get(nil)) + db.Delete(key) + require.Nil(t, db.Get(key)) } -func TestBackends(t *testing.T) { - testBackend(t, CLevelDBBackendStr) - testBackend(t, GoLevelDBBackendStr) - testBackend(t, MemDBBackendStr) +func TestBackendsGetSetDelete(t *testing.T) { + for dbType, _ := range backends { + testBackendGetSetDelete(t, dbType) + } +} + +func withDB(t *testing.T, creator dbCreator, fn func(DB)) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db, err := creator(name, "") + defer cleanupDBDir("", name) + assert.Nil(t, err) + fn(db) + db.Close() +} + +func TestBackendsNilKeys(t *testing.T) { + // test all backends + for dbType, creator := range backends { + withDB(t, creator, func(db DB) { + panicMsg := "expecting %s.%s to panic" + assert.Panics(t, func() { db.Get(nil) }, panicMsg, dbType, "get") + assert.Panics(t, func() { db.Has(nil) }, panicMsg, dbType, "has") + assert.Panics(t, func() { db.Set(nil, []byte("abc")) }, panicMsg, dbType, "set") + assert.Panics(t, func() { db.SetSync(nil, []byte("abc")) }, panicMsg, dbType, "setsync") + assert.Panics(t, func() { db.Delete(nil) }, panicMsg, dbType, "delete") + assert.Panics(t, func() { db.DeleteSync(nil) }, panicMsg, dbType, "deletesync") + }) + } +} + +func TestGoLevelDBBackendStr(t *testing.T) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackendStr, "") + defer cleanupDBDir("", name) + + if _, ok := backends[CLevelDBBackendStr]; !ok { + _, ok := db.(*GoLevelDB) + assert.True(t, ok) + } } diff --git a/db/c_level_db.go b/db/c_level_db.go index e4450aaa..60198d84 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -3,8 +3,9 @@ package db import ( + "bytes" "fmt" - "path" + "path/filepath" "github.com/jmhodges/levigo" ) @@ -17,17 +18,17 @@ func init() { registerDBCreator(CLevelDBBackendStr, dbCreator, false) } +var _ DB = (*CLevelDB)(nil) + type CLevelDB struct { db *levigo.DB ro *levigo.ReadOptions wo *levigo.WriteOptions woSync *levigo.WriteOptions - - cwwMutex } func NewCLevelDB(name string, dir string) (*CLevelDB, error) { - dbPath := path.Join(dir, name+".db") + dbPath := filepath.Join(dir, name+".db") opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(1 << 30)) @@ -45,13 +46,12 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { ro: ro, wo: wo, woSync: woSync, - - cwwMutex: NewCWWMutex(), } return database, nil } func (db *CLevelDB) Get(key []byte) []byte { + panicNilKey(key) res, err := db.db.Get(db.ro, key) if err != nil { panic(err) @@ -59,7 +59,13 @@ func (db *CLevelDB) Get(key []byte) []byte { return res } +func (db *CLevelDB) Has(key []byte) bool { + panicNilKey(key) + panic("not implemented yet") +} + func (db *CLevelDB) Set(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(db.wo, key, value) if err != nil { panic(err) @@ -67,6 +73,7 @@ func (db *CLevelDB) Set(key []byte, value []byte) { } func (db *CLevelDB) SetSync(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(db.woSync, key, value) if err != nil { panic(err) @@ -74,6 +81,7 @@ func (db *CLevelDB) SetSync(key []byte, value []byte) { } func (db *CLevelDB) Delete(key []byte) { + panicNilKey(key) err := db.db.Delete(db.wo, key) if err != nil { panic(err) @@ -81,6 +89,7 @@ func (db *CLevelDB) Delete(key []byte) { } func (db *CLevelDB) DeleteSync(key []byte) { + panicNilKey(key) err := db.db.Delete(db.woSync, key) if err != nil { panic(err) @@ -99,9 +108,9 @@ func (db *CLevelDB) Close() { } func (db *CLevelDB) Print() { - itr := db.Iterator() + itr := db.Iterator(BeginningKey(), EndingKey()) defer itr.Close() - for itr.Seek(nil); itr.Valid(); itr.Next() { + for ; itr.Valid(); itr.Next() { key := itr.Key() value := itr.Value() fmt.Printf("[%X]:\t[%X]\n", key, value) @@ -120,10 +129,6 @@ func (db *CLevelDB) Stats() map[string]string { return stats } -func (db *CLevelDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - //---------------------------------------- // Batch @@ -155,59 +160,93 @@ func (mBatch *cLevelDBBatch) Write() { //---------------------------------------- // Iterator -func (db *CLevelDB) Iterator() Iterator { +func (db *CLevelDB) Iterator(start, end []byte) Iterator { itr := db.db.NewIterator(db.ro) - itr.Seek([]byte{0x00}) - return cLevelDBIterator{itr} + return newCLevelDBIterator(itr, start, end) } +func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil +} + +var _ Iterator = (*cLevelDBIterator)(nil) + type cLevelDBIterator struct { - itr *levigo.Iterator + itr *levigo.Iterator + start, end []byte + invalid bool } -func (c cLevelDBIterator) Seek(key []byte) { - if key == nil { - key = []byte{0x00} +func newCLevelDBIterator(itr *levigo.Iterator, start, end []byte) *cLevelDBIterator { + + if len(start) > 0 { + itr.Seek(start) + } else { + itr.SeekToFirst() + } + + return &cLevelDBIterator{ + itr: itr, + start: start, + end: end, } - c.itr.Seek(key) } -func (c cLevelDBIterator) Valid() bool { - return c.itr.Valid() +func (c *cLevelDBIterator) Domain() ([]byte, []byte) { + return c.start, c.end } -func (c cLevelDBIterator) Key() []byte { - if !c.itr.Valid() { +func (c *cLevelDBIterator) Valid() bool { + c.assertNoError() + if c.invalid { + return false + } + c.invalid = !c.itr.Valid() + return !c.invalid +} + +func (c *cLevelDBIterator) Key() []byte { + if !c.Valid() { panic("cLevelDBIterator Key() called when invalid") } return c.itr.Key() } -func (c cLevelDBIterator) Value() []byte { - if !c.itr.Valid() { +func (c *cLevelDBIterator) Value() []byte { + if !c.Valid() { panic("cLevelDBIterator Value() called when invalid") } return c.itr.Value() } -func (c cLevelDBIterator) Next() { - if !c.itr.Valid() { +func (c *cLevelDBIterator) Next() { + if !c.Valid() { panic("cLevelDBIterator Next() called when invalid") } c.itr.Next() + c.checkEndKey() // if we've exceeded the range, we're now invalid } -func (c cLevelDBIterator) Prev() { +// levigo has no upper bound when iterating, so need to check ourselves +func (c *cLevelDBIterator) checkEndKey() { if !c.itr.Valid() { - panic("cLevelDBIterator Prev() called when invalid") + c.invalid = true + return + } + + key := c.itr.Key() + if c.end != nil && bytes.Compare(key, c.end) > 0 { + c.invalid = true } - c.itr.Prev() } -func (c cLevelDBIterator) Close() { +func (c *cLevelDBIterator) Close() { c.itr.Close() } -func (c cLevelDBIterator) GetError() error { - return c.itr.GetError() +func (c *cLevelDBIterator) assertNoError() { + if err := c.itr.GetError(); err != nil { + panic(err) + } } diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index 86436233..89993fba 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -7,6 +7,7 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/assert" cmn "github.com/tendermint/tmlibs/common" ) @@ -84,3 +85,12 @@ func bytes2Int64(buf []byte) int64 { return int64(binary.BigEndian.Uint64(buf)) } */ + +func TestCLevelDBBackendStr(t *testing.T) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackendStr, "") + defer cleanupDBDir("", name) + + _, ok := db.(*CLevelDB) + assert.True(t, ok) +} diff --git a/db/cache_db.go b/db/cache_db.go deleted file mode 100644 index ed85e305..00000000 --- a/db/cache_db.go +++ /dev/null @@ -1,231 +0,0 @@ -package db - -import ( - "fmt" - "sort" - "sync" - "sync/atomic" -) - -// If value is nil but deleted is false, -// it means the parent doesn't have the key. -// (No need to delete upon Write()) -type cDBValue struct { - value []byte - deleted bool - dirty bool -} - -// cacheDB wraps an in-memory cache around an underlying DB. -type cacheDB struct { - mtx sync.Mutex - cache map[string]cDBValue - parent DB - lockVersion interface{} - - cwwMutex -} - -// Needed by MultiStore.CacheWrap(). -var _ atomicSetDeleter = (*cacheDB)(nil) -var _ CacheDB = (*cacheDB)(nil) - -// Users should typically not be required to call NewCacheDB directly, as the -// DB implementations here provide a .CacheDB() function already. -// `lockVersion` is typically provided by parent.GetWriteLockVersion(). -func NewCacheDB(parent DB, lockVersion interface{}) CacheDB { - db := &cacheDB{ - cache: make(map[string]cDBValue), - parent: parent, - lockVersion: lockVersion, - cwwMutex: NewCWWMutex(), - } - return db -} - -func (db *cacheDB) Get(key []byte) []byte { - db.mtx.Lock() - defer db.mtx.Unlock() - - dbValue, ok := db.cache[string(key)] - if !ok { - data := db.parent.Get(key) - dbValue = cDBValue{value: data, deleted: false, dirty: false} - db.cache[string(key)] = dbValue - } - return dbValue.value -} - -func (db *cacheDB) Set(key []byte, value []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.SetNoLock(key, value) -} - -func (db *cacheDB) SetSync(key []byte, value []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.SetNoLock(key, value) -} - -func (db *cacheDB) SetNoLock(key []byte, value []byte) { - db.cache[string(key)] = cDBValue{value: value, deleted: false, dirty: true} -} - -func (db *cacheDB) Delete(key []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.DeleteNoLock(key) -} - -func (db *cacheDB) DeleteSync(key []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.DeleteNoLock(key) -} - -func (db *cacheDB) DeleteNoLock(key []byte) { - db.cache[string(key)] = cDBValue{value: nil, deleted: true, dirty: true} -} - -func (db *cacheDB) Close() { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.parent.Close() -} - -func (db *cacheDB) Print() { - db.mtx.Lock() - defer db.mtx.Unlock() - - fmt.Println("cacheDB\ncache:") - for key, value := range db.cache { - fmt.Printf("[%X]:\t[%v]\n", []byte(key), value) - } - fmt.Println("\nparent:") - db.parent.Print() -} - -func (db *cacheDB) Stats() map[string]string { - db.mtx.Lock() - defer db.mtx.Unlock() - - stats := make(map[string]string) - stats["cache.size"] = fmt.Sprintf("%d", len(db.cache)) - stats["cache.lock_version"] = fmt.Sprintf("%v", db.lockVersion) - mergeStats(db.parent.Stats(), stats, "parent.") - return stats -} - -func (db *cacheDB) Iterator() Iterator { - panic("cacheDB.Iterator() not yet supported") -} - -func (db *cacheDB) NewBatch() Batch { - return &memBatch{db, nil} -} - -// Implements `atomicSetDeleter` for Batch support. -func (db *cacheDB) Mutex() *sync.Mutex { - return &(db.mtx) -} - -// Write writes pending updates to the parent database and clears the cache. -func (db *cacheDB) Write() { - db.mtx.Lock() - defer db.mtx.Unlock() - - // Optional sanity check to ensure that cacheDB is valid - if parent, ok := db.parent.(WriteLocker); ok { - if parent.TryWriteLock(db.lockVersion) { - // All good! - } else { - panic("cacheDB.Write() failed. Did this CacheDB expire?") - } - } - - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - keys := make([]string, 0, len(db.cache)) - for key, dbValue := range db.cache { - if dbValue.dirty { - keys = append(keys, key) - } - } - sort.Strings(keys) - - batch := db.parent.NewBatch() - for _, key := range keys { - dbValue := db.cache[key] - if dbValue.deleted { - batch.Delete([]byte(key)) - } else if dbValue.value == nil { - // Skip, it already doesn't exist in parent. - } else { - batch.Set([]byte(key), dbValue.value) - } - } - batch.Write() - - // Clear the cache - db.cache = make(map[string]cDBValue) -} - -//---------------------------------------- -// To cache-wrap this cacheDB further. - -func (db *cacheDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - -// If the parent parent DB implements this, (e.g. such as a cacheDB parent to a -// cacheDB child), cacheDB will call `parent.TryWriteLock()` before attempting -// to write. -type WriteLocker interface { - GetWriteLockVersion() (lockVersion interface{}) - TryWriteLock(lockVersion interface{}) bool -} - -// Implements TryWriteLocker. Embed this in DB structs if desired. -type cwwMutex struct { - mtx sync.Mutex - // CONTRACT: reading/writing to `*written` should use `atomic.*`. - // CONTRACT: replacing `written` with another *int32 should use `.mtx`. - written *int32 -} - -func NewCWWMutex() cwwMutex { - return cwwMutex{ - written: new(int32), - } -} - -func (cww *cwwMutex) GetWriteLockVersion() interface{} { - cww.mtx.Lock() - defer cww.mtx.Unlock() - - // `written` works as a "version" object because it gets replaced upon - // successful TryWriteLock. - return cww.written -} - -func (cww *cwwMutex) TryWriteLock(version interface{}) bool { - cww.mtx.Lock() - defer cww.mtx.Unlock() - - if version != cww.written { - return false // wrong "WriteLockVersion" - } - if !atomic.CompareAndSwapInt32(cww.written, 0, 1) { - return false // already written - } - - // New "WriteLockVersion" - cww.written = new(int32) - return true -} diff --git a/db/cache_db_test.go b/db/cache_db_test.go deleted file mode 100644 index 2a2684fe..00000000 --- a/db/cache_db_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package db - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func bz(s string) []byte { return []byte(s) } - -func TestCacheDB(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - - require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") - - mem.Set(bz("key1"), bz("value1")) - cdb.Set(bz("key1"), bz("value1")) - require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - - cdb.Set(bz("key1"), bz("value2")) - require.Equal(t, bz("value2"), cdb.Get(bz("key1"))) - require.Equal(t, bz("value1"), mem.Get(bz("key1"))) - - cdb.Write() - require.Equal(t, bz("value2"), mem.Get(bz("key1"))) - - require.Panics(t, func() { cdb.Write() }, "Expected second cdb.Write() to fail") - - cdb = mem.CacheDB() - cdb.Delete(bz("key1")) - require.Empty(t, cdb.Get(bz("key1"))) - require.Equal(t, mem.Get(bz("key1")), bz("value2")) - - cdb.Write() - require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") - require.Empty(t, mem.Get(bz("key1")), "Expected `key1` to be empty") -} - -func TestCacheDBWriteLock(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - require.NotPanics(t, func() { cdb.Write() }) - require.Panics(t, func() { cdb.Write() }) - cdb = mem.CacheDB() - require.NotPanics(t, func() { cdb.Write() }) - require.Panics(t, func() { cdb.Write() }) -} - -func TestCacheDBWriteLockNested(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - cdb2 := cdb.CacheDB() - require.NotPanics(t, func() { cdb2.Write() }) - require.Panics(t, func() { cdb2.Write() }) - cdb2 = cdb.CacheDB() - require.NotPanics(t, func() { cdb2.Write() }) - require.Panics(t, func() { cdb2.Write() }) -} - -func TestCacheDBNested(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - cdb.Set(bz("key1"), bz("value1")) - - require.Empty(t, mem.Get(bz("key1"))) - require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - cdb2 := cdb.CacheDB() - require.Equal(t, bz("value1"), cdb2.Get(bz("key1"))) - - cdb2.Set(bz("key1"), bz("VALUE2")) - require.Equal(t, []byte(nil), mem.Get(bz("key1"))) - require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - require.Equal(t, bz("VALUE2"), cdb2.Get(bz("key1"))) - - cdb2.Write() - require.Equal(t, []byte(nil), mem.Get(bz("key1"))) - require.Equal(t, bz("VALUE2"), cdb.Get(bz("key1"))) - - cdb.Write() - require.Equal(t, bz("VALUE2"), mem.Get(bz("key1"))) - -} diff --git a/db/common_test.go b/db/common_test.go index 505864c2..6b300979 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -5,34 +5,25 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" cmn "github.com/tendermint/tmlibs/common" ) func checkValid(t *testing.T, itr Iterator, expected bool) { valid := itr.Valid() - assert.Equal(t, expected, valid) + require.Equal(t, expected, valid) } func checkNext(t *testing.T, itr Iterator, expected bool) { itr.Next() valid := itr.Valid() - assert.Equal(t, expected, valid) + require.Equal(t, expected, valid) } func checkNextPanics(t *testing.T, itr Iterator) { assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") } -func checkPrevPanics(t *testing.T, itr Iterator) { - assert.Panics(t, func() { itr.Prev() }, "checkPrevPanics expected panic but didn't") -} - -func checkPrev(t *testing.T, itr Iterator, expected bool) { - itr.Prev() - valid := itr.Valid() - assert.Equal(t, expected, valid) -} - func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { k, v := itr.Key(), itr.Value() assert.Exactly(t, key, k) @@ -44,7 +35,6 @@ func checkInvalid(t *testing.T, itr Iterator) { checkKeyPanics(t, itr) checkValuePanics(t, itr) checkNextPanics(t, itr) - checkPrevPanics(t, itr) } func checkKeyPanics(t *testing.T, itr Iterator) { @@ -67,7 +57,7 @@ func TestDBIteratorSingleKey(t *testing.T) { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator() + itr := db.Iterator(BeginningKey(), EndingKey()) checkValid(t, itr, true) checkNext(t, itr, false) @@ -88,17 +78,9 @@ func TestDBIteratorTwoKeys(t *testing.T) { db.SetSync(bz("2"), bz("value_1")) { // Fail by calling Next too much - itr := db.Iterator() + itr := db.Iterator(BeginningKey(), EndingKey()) checkValid(t, itr, true) - for i := 0; i < 10; i++ { - checkNext(t, itr, true) - checkValid(t, itr, true) - - checkPrev(t, itr, true) - checkValid(t, itr, true) - } - checkNext(t, itr, true) checkValid(t, itr, true) @@ -110,27 +92,6 @@ func TestDBIteratorTwoKeys(t *testing.T) { // Once invalid... checkInvalid(t, itr) } - - { // Fail by calling Prev too much - itr := db.Iterator() - checkValid(t, itr, true) - - for i := 0; i < 10; i++ { - checkNext(t, itr, true) - checkValid(t, itr, true) - - checkPrev(t, itr, true) - checkValid(t, itr, true) - } - - checkPrev(t, itr, false) - checkValid(t, itr, false) - - checkPrevPanics(t, itr) - - // Once invalid... - checkInvalid(t, itr) - } }) } } @@ -139,32 +100,30 @@ func TestDBIteratorEmpty(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - itr := db.Iterator() + itr := db.Iterator(BeginningKey(), EndingKey()) checkInvalid(t, itr) }) } } -func TestDBIteratorEmptySeek(t *testing.T) { +func TestDBIteratorEmptyBeginAfter(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - itr := db.Iterator() - itr.Seek(bz("1")) + itr := db.Iterator(bz("1"), EndingKey()) checkInvalid(t, itr) }) } } -func TestDBIteratorBadSeek(t *testing.T) { +func TestDBIteratorNonemptyBeginAfter(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator() - itr.Seek(bz("2")) + itr := db.Iterator(bz("2"), EndingKey()) checkInvalid(t, itr) }) diff --git a/db/db.go b/db/db.go index 08ebaeaa..b43b0655 100644 --- a/db/db.go +++ b/db/db.go @@ -1,98 +1,12 @@ package db -import . "github.com/tendermint/tmlibs/common" - -type DB interface { - Get([]byte) []byte // NOTE: returns nil iff never set or deleted. - Set([]byte, []byte) - SetSync([]byte, []byte) - Delete([]byte) - DeleteSync([]byte) - Close() - NewBatch() Batch - Iterator() Iterator - - // For debugging - Print() - - // Stats returns a map of property values for all keys and the size of the cache. - Stats() map[string]string - - // CacheDB wraps the DB w/ a cache. - CacheDB() CacheDB -} - -type CacheDB interface { - DB - Write() // Write to the underlying DB -} - -type SetDeleter interface { - Set(key, value []byte) - Delete(key []byte) -} - -type Batch interface { - SetDeleter - Write() -} - -/* - Usage: - - for itr.Seek(mykey); itr.Valid(); itr.Next() { - k, v := itr.Key(); itr.Value() - .... - } -*/ -type Iterator interface { - - // Seek moves the iterator the position of the key given or, if the key - // doesn't exist, the next key that does exist in the database. If the key - // doesn't exist, and there is no next key, the Iterator becomes invalid. - Seek(key []byte) - - // Valid returns false only when an Iterator has iterated past either the - // first or the last key in the database. - Valid() bool - - // Next moves the iterator to the next sequential key in the database, as - // defined by the Comparator in the ReadOptions used to create this Iterator. - // - // If Valid returns false, this method will panic. - Next() - - // Prev moves the iterator to the previous sequential key in the database, as - // defined by the Comparator in the ReadOptions used to create this Iterator. - // - // If Valid returns false, this method will panic. - Prev() - - // Key returns the key of the cursor. - // - // If Valid returns false, this method will panic. - Key() []byte - - // Value returns the key of the cursor. - // - // If Valid returns false, this method will panic. - Value() []byte - - // GetError returns an IteratorError from LevelDB if it had one during - // iteration. - // - // This method is safe to call when Valid returns false. - GetError() error - - // Close deallocates the given Iterator. - Close() -} +import "fmt" //----------------------------------------------------------------------------- // Main entry const ( - LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb. + LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb unless +gcc CLevelDBBackendStr = "cleveldb" GoLevelDBBackendStr = "goleveldb" MemDBBackendStr = "memdb" @@ -114,7 +28,7 @@ func registerDBCreator(backend string, creator dbCreator, force bool) { func NewDB(name string, backend string, dir string) DB { db, err := backends[backend](name, dir) if err != nil { - PanicSanity(Fmt("Error initializing DB: %v", err)) + panic(fmt.Sprintf("Error initializing DB: %v", err)) } return db } diff --git a/db/fsdb.go b/db/fsdb.go index 4b191445..056cc398 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -5,12 +5,12 @@ import ( "io/ioutil" "net/url" "os" - "path" "path/filepath" "sort" "sync" "github.com/pkg/errors" + cmn "github.com/tendermint/tmlibs/common" ) const ( @@ -25,12 +25,12 @@ func init() { }, false) } +var _ DB = (*FSDB)(nil) + // It's slow. type FSDB struct { mtx sync.Mutex dir string - - cwwMutex } func NewFSDB(dir string) *FSDB { @@ -39,8 +39,7 @@ func NewFSDB(dir string) *FSDB { panic(errors.Wrap(err, "Creating FSDB dir "+dir)) } database := &FSDB{ - dir: dir, - cwwMutex: NewCWWMutex(), + dir: dir, } return database } @@ -48,20 +47,31 @@ func NewFSDB(dir string) *FSDB { func (db *FSDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) path := db.nameToPath(key) value, err := read(path) if os.IsNotExist(err) { return nil } else if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Getting key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key)) } return value } +func (db *FSDB) Has(key []byte) bool { + db.mtx.Lock() + defer db.mtx.Unlock() + panicNilKey(key) + + path := db.nameToPath(key) + return cmn.FileExists(path) +} + func (db *FSDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.SetNoLock(key, value) } @@ -69,25 +79,28 @@ func (db *FSDB) Set(key []byte, value []byte) { func (db *FSDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.SetNoLock(key, value) } // NOTE: Implements atomicSetDeleter. func (db *FSDB) SetNoLock(key []byte, value []byte) { + panicNilKey(key) if value == nil { value = []byte{} } path := db.nameToPath(key) err := write(path, value) if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Setting key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key)) } } func (db *FSDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.DeleteNoLock(key) } @@ -95,17 +108,20 @@ func (db *FSDB) Delete(key []byte) { func (db *FSDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.DeleteNoLock(key) } // NOTE: Implements atomicSetDeleter. func (db *FSDB) DeleteNoLock(key []byte) { - err := remove(string(key)) + panicNilKey(key) + path := db.nameToPath(key) + err := remove(path) if os.IsNotExist(err) { return } else if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Removing key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key)) } } @@ -140,32 +156,31 @@ func (db *FSDB) Mutex() *sync.Mutex { return &(db.mtx) } -func (db *FSDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - -func (db *FSDB) Iterator() Iterator { - it := newMemDBIterator() - it.db = db - it.cur = 0 +func (db *FSDB) Iterator(start, end []byte) Iterator { + it := newMemDBIterator(db, start, end) db.mtx.Lock() defer db.mtx.Unlock() // We need a copy of all of the keys. // Not the best, but probably not a bottleneck depending. - keys, err := list(db.dir) + keys, err := list(db.dir, start, end) if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) + panic(errors.Wrapf(err, "Listing keys in %s", db.dir)) } sort.Strings(keys) it.keys = keys return it } +func (db *FSDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil +} + func (db *FSDB) nameToPath(name []byte) string { n := url.PathEscape(string(name)) - return path.Join(db.dir, n) + return filepath.Join(db.dir, n) } // Read some bytes to a file. @@ -187,7 +202,7 @@ func read(path string) ([]byte, error) { // Write some bytes from a file. // CONTRACT: returns os errors directly without wrapping. func write(path string, d []byte) error { - f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, keyPerm) + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm) if err != nil { return err } @@ -209,7 +224,7 @@ func remove(path string) error { // List files of a path. // Paths will NOT include dir as the prefix. // CONTRACT: returns os errors directly without wrapping. -func list(dirPath string) (paths []string, err error) { +func list(dirPath string, start, end []byte) ([]string, error) { dir, err := os.Open(dirPath) if err != nil { return nil, err @@ -220,12 +235,15 @@ func list(dirPath string) (paths []string, err error) { if err != nil { return nil, err } - for i, name := range names { + var paths []string + for _, name := range names { n, err := url.PathUnescape(name) if err != nil { return nil, fmt.Errorf("Failed to unescape %s while listing", name) } - names[i] = n + if IsKeyInDomain([]byte(n), start, end) { + paths = append(paths, n) + } } - return names, nil + return paths, nil } diff --git a/db/go_level_db.go b/db/go_level_db.go index cffe7329..45cb0498 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -2,12 +2,13 @@ package db import ( "fmt" - "path" + "path/filepath" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" . "github.com/tendermint/tmlibs/common" ) @@ -20,26 +21,26 @@ func init() { registerDBCreator(GoLevelDBBackendStr, dbCreator, false) } +var _ DB = (*GoLevelDB)(nil) + type GoLevelDB struct { db *leveldb.DB - - cwwMutex } func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { - dbPath := path.Join(dir, name+".db") + dbPath := filepath.Join(dir, name+".db") db, err := leveldb.OpenFile(dbPath, nil) if err != nil { return nil, err } database := &GoLevelDB{ - db: db, - cwwMutex: NewCWWMutex(), + db: db, } return database, nil } func (db *GoLevelDB) Get(key []byte) []byte { + panicNilKey(key) res, err := db.db.Get(key, nil) if err != nil { if err == errors.ErrNotFound { @@ -51,7 +52,21 @@ func (db *GoLevelDB) Get(key []byte) []byte { return res } +func (db *GoLevelDB) Has(key []byte) bool { + panicNilKey(key) + _, err := db.db.Get(key, nil) + if err != nil { + if err == errors.ErrNotFound { + return false + } else { + PanicCrisis(err) + } + } + return true +} + func (db *GoLevelDB) Set(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(key, value, nil) if err != nil { PanicCrisis(err) @@ -59,6 +74,7 @@ func (db *GoLevelDB) Set(key []byte, value []byte) { } func (db *GoLevelDB) SetSync(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) @@ -66,6 +82,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) { } func (db *GoLevelDB) Delete(key []byte) { + panicNilKey(key) err := db.db.Delete(key, nil) if err != nil { PanicCrisis(err) @@ -73,6 +90,7 @@ func (db *GoLevelDB) Delete(key []byte) { } func (db *GoLevelDB) DeleteSync(key []byte) { + panicNilKey(key) err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) @@ -121,10 +139,6 @@ func (db *GoLevelDB) Stats() map[string]string { return stats } -func (db *GoLevelDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - //---------------------------------------- // Batch @@ -156,17 +170,46 @@ func (mBatch *goLevelDBBatch) Write() { //---------------------------------------- // Iterator -func (db *GoLevelDB) Iterator() Iterator { - itr := &goLevelDBIterator{ - source: db.db.NewIterator(nil, nil), +// https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator +// A nil Range.Start is treated as a key before all keys in the DB. +// And a nil Range.Limit is treated as a key after all keys in the DB. +func goLevelDBIterRange(start, end []byte) *util.Range { + // XXX: what if start == nil ? + if len(start) == 0 { + start = nil + } + return &util.Range{ + Start: start, + Limit: end, } - itr.Seek(nil) - return itr } +func (db *GoLevelDB) Iterator(start, end []byte) Iterator { + itrRange := goLevelDBIterRange(start, end) + itr := db.db.NewIterator(itrRange, nil) + itr.Seek(start) // if we don't call this the itr is never valid (?!) + return &goLevelDBIterator{ + source: itr, + start: start, + end: end, + } +} + +func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil +} + +var _ Iterator = (*goLevelDBIterator)(nil) + type goLevelDBIterator struct { - source iterator.Iterator - invalid bool + source iterator.Iterator + invalid bool + start, end []byte +} + +func (it *goLevelDBIterator) Domain() ([]byte, []byte) { + return it.start, it.end } // Key returns a copy of the current key. @@ -193,15 +236,8 @@ func (it *goLevelDBIterator) Value() []byte { return v } -func (it *goLevelDBIterator) GetError() error { - return it.source.Error() -} - -func (it *goLevelDBIterator) Seek(key []byte) { - it.source.Seek(key) -} - func (it *goLevelDBIterator) Valid() bool { + it.assertNoError() if it.invalid { return false } @@ -226,3 +262,9 @@ func (it *goLevelDBIterator) Prev() { func (it *goLevelDBIterator) Close() { it.source.Release() } + +func (it *goLevelDBIterator) assertNoError() { + if err := it.source.Error(); err != nil { + panic(err) + } +} diff --git a/db/mem_db.go b/db/mem_db.go index f5d55f3a..44254870 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -1,7 +1,6 @@ package db import ( - "bytes" "fmt" "sort" "sync" @@ -13,17 +12,16 @@ func init() { }, false) } +var _ DB = (*MemDB)(nil) + type MemDB struct { mtx sync.Mutex db map[string][]byte - - cwwMutex } func NewMemDB() *MemDB { database := &MemDB{ - db: make(map[string][]byte), - cwwMutex: NewCWWMutex(), + db: make(map[string][]byte), } return database } @@ -31,21 +29,29 @@ func NewMemDB() *MemDB { func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) return db.db[string(key)] } +func (db *MemDB) Has(key []byte) bool { + db.mtx.Lock() + defer db.mtx.Unlock() + panicNilKey(key) + _, ok := db.db[string(key)] + return ok +} + func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) db.SetNoLock(key, value) } func (db *MemDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) db.SetNoLock(key, value) } @@ -54,25 +60,27 @@ func (db *MemDB) SetNoLock(key []byte, value []byte) { if value == nil { value = []byte{} } + panicNilKey(key) db.db[string(key)] = value } func (db *MemDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) delete(db.db, string(key)) } func (db *MemDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) delete(db.db, string(key)) } // NOTE: Implements atomicSetDeleter func (db *MemDB) DeleteNoLock(key []byte) { + panicNilKey(key) delete(db.db, string(key)) } @@ -114,47 +122,68 @@ func (db *MemDB) Mutex() *sync.Mutex { return &(db.mtx) } -func (db *MemDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - //---------------------------------------- -func (db *MemDB) Iterator() Iterator { - it := newMemDBIterator() - it.db = db - it.cur = 0 +func (db *MemDB) Iterator(start, end []byte) Iterator { + it := newMemDBIterator(db, start, end) db.mtx.Lock() defer db.mtx.Unlock() // We need a copy of all of the keys. // Not the best, but probably not a bottleneck depending. - for key, _ := range db.db { - it.keys = append(it.keys, key) - } - sort.Strings(it.keys) + it.keys = db.getSortedKeys(start, end) return it } -type memDBIterator struct { - cur int - keys []string - db DB +func (db *MemDB) ReverseIterator(start, end []byte) Iterator { + it := newMemDBIterator(db, start, end) + + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + it.keys = db.getSortedKeys(end, start) + // reverse the order + l := len(it.keys) - 1 + for i, v := range it.keys { + it.keys[i] = it.keys[l-i] + it.keys[l-i] = v + } + return nil } -func newMemDBIterator() *memDBIterator { - return &memDBIterator{} -} - -func (it *memDBIterator) Seek(key []byte) { - for i, ik := range it.keys { - it.cur = i - if bytes.Compare(key, []byte(ik)) <= 0 { - return +func (db *MemDB) getSortedKeys(start, end []byte) []string { + keys := []string{} + for key, _ := range db.db { + if IsKeyInDomain([]byte(key), start, end) { + keys = append(keys, key) } } - it.cur += 1 // If not found, becomes invalid. + sort.Strings(keys) + return keys +} + +var _ Iterator = (*memDBIterator)(nil) + +type memDBIterator struct { + cur int + keys []string + db DB + start, end []byte +} + +func newMemDBIterator(db DB, start, end []byte) *memDBIterator { + return &memDBIterator{ + db: db, + start: start, + end: end, + } +} + +func (it *memDBIterator) Domain() ([]byte, []byte) { + return it.start, it.end } func (it *memDBIterator) Valid() bool { @@ -193,7 +222,3 @@ func (it *memDBIterator) Close() { it.db = nil it.keys = nil } - -func (it *memDBIterator) GetError() error { - return nil -} diff --git a/db/mem_db_test.go b/db/mem_db_test.go index b5c9167c..a08a3679 100644 --- a/db/mem_db_test.go +++ b/db/mem_db_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestMemDbIterator(t *testing.T) { +func TestMemDBIterator(t *testing.T) { db := NewMemDB() keys := make([][]byte, 100) for i := 0; i < 100; i++ { @@ -19,7 +19,7 @@ func TestMemDbIterator(t *testing.T) { db.Set(k, value) } - iter := db.Iterator() + iter := db.Iterator(BeginningKey(), EndingKey()) i := 0 for ; iter.Valid(); iter.Next() { assert.Equal(t, db.Get(iter.Key()), iter.Value(), "values dont match for key") diff --git a/db/types.go b/db/types.go new file mode 100644 index 00000000..ee8d69cc --- /dev/null +++ b/db/types.go @@ -0,0 +1,124 @@ +package db + +type DB interface { + + // Get returns nil iff key doesn't exist. Panics on nil key. + Get([]byte) []byte + + // Has checks if a key exists. Panics on nil key. + Has(key []byte) bool + + // Set sets the key. Panics on nil key. + Set([]byte, []byte) + SetSync([]byte, []byte) + + // Delete deletes the key. Panics on nil key. + Delete([]byte) + DeleteSync([]byte) + + // Iterator over a domain of keys in ascending order. End is exclusive. + // Start must be less than end, or the Iterator is invalid. + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + Iterator(start, end []byte) Iterator + + // Iterator over a domain of keys in descending order. End is exclusive. + // Start must be greater than end, or the Iterator is invalid. + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + ReverseIterator(start, end []byte) Iterator + + // Releases the connection. + Close() + + // Creates a batch for atomic updates. + NewBatch() Batch + + // For debugging + Print() + + // Stats returns a map of property values for all keys and the size of the cache. + Stats() map[string]string +} + +//---------------------------------------- +// Batch + +type Batch interface { + SetDeleter + Write() +} + +type SetDeleter interface { + Set(key, value []byte) + Delete(key []byte) +} + +//---------------------------------------- + +// BeginningKey is the smallest key. +func BeginningKey() []byte { + return []byte{} +} + +// EndingKey is the largest key. +func EndingKey() []byte { + return nil +} + +/* + Usage: + + var itr Iterator = ... + defer itr.Close() + + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(); itr.Value() + // ... + } +*/ +type Iterator interface { + + // The start & end (exclusive) limits to iterate over. + // If end < start, then the Iterator goes in reverse order. + // + // A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate + // over anything with the prefix []byte{12, 13}. + // + // The smallest key is the empty byte array []byte{} - see BeginningKey(). + // The largest key is the nil byte array []byte(nil) - see EndingKey(). + Domain() (start []byte, end []byte) + + // Valid returns whether the current position is valid. + // Once invalid, an Iterator is forever invalid. + Valid() bool + + // Next moves the iterator to the next sequential key in the database, as + // defined by order of iteration. + // + // If Valid returns false, this method will panic. + Next() + + // Key returns the key of the cursor. + // + // If Valid returns false, this method will panic. + Key() []byte + + // Value returns the value of the cursor. + // + // If Valid returns false, this method will panic. + Value() []byte + + // Release deallocates the given Iterator. + Close() +} + +// For testing convenience. +func bz(s string) []byte { + return []byte(s) +} + +// All DB funcs should panic on nil key. +func panicNilKey(key []byte) { + if key == nil { + panic("nil key") + } +} diff --git a/db/util.go b/db/util.go index 5f381a5b..661d0a16 100644 --- a/db/util.go +++ b/db/util.go @@ -1,82 +1,45 @@ package db -import "bytes" - -// A wrapper around itr that tries to keep the iterator -// within the bounds as defined by `prefix` -type prefixIterator struct { - itr Iterator - prefix []byte - invalid bool -} - -func (pi *prefixIterator) Seek(key []byte) { - if !bytes.HasPrefix(key, pi.prefix) { - pi.invalid = true - return - } - pi.itr.Seek(key) - pi.checkInvalid() -} - -func (pi *prefixIterator) checkInvalid() { - if !pi.itr.Valid() { - pi.invalid = true - } -} - -func (pi *prefixIterator) Valid() bool { - if pi.invalid { - return false - } - key := pi.itr.Key() - ok := bytes.HasPrefix(key, pi.prefix) - if !ok { - pi.invalid = true - return false - } - return true -} - -func (pi *prefixIterator) Next() { - if pi.invalid { - panic("prefixIterator Next() called when invalid") - } - pi.itr.Next() - pi.checkInvalid() -} - -func (pi *prefixIterator) Prev() { - if pi.invalid { - panic("prefixIterator Prev() called when invalid") - } - pi.itr.Prev() - pi.checkInvalid() -} - -func (pi *prefixIterator) Key() []byte { - if pi.invalid { - panic("prefixIterator Key() called when invalid") - } - return pi.itr.Key() -} - -func (pi *prefixIterator) Value() []byte { - if pi.invalid { - panic("prefixIterator Value() called when invalid") - } - return pi.itr.Value() -} - -func (pi *prefixIterator) Close() { pi.itr.Close() } -func (pi *prefixIterator) GetError() error { return pi.itr.GetError() } +import ( + "bytes" +) func IteratePrefix(db DB, prefix []byte) Iterator { - itr := db.Iterator() - pi := &prefixIterator{ - itr: itr, - prefix: prefix, + var start, end []byte + if len(prefix) == 0 { + start = BeginningKey() + end = EndingKey() + } else { + start = cp(prefix) + end = cpIncr(prefix) } - pi.Seek(prefix) - return pi + return db.Iterator(start, end) +} + +//---------------------------------------- + +func cp(bz []byte) (ret []byte) { + ret = make([]byte, len(bz)) + copy(ret, bz) + return ret +} + +// CONTRACT: len(bz) > 0 +func cpIncr(bz []byte) (ret []byte) { + ret = cp(bz) + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] < byte(0xFF) { + ret[i] += 1 + return + } else { + ret[i] = byte(0x00) + } + } + return EndingKey() +} + +func IsKeyInDomain(key, start, end []byte) bool { + leftCondition := bytes.Equal(start, BeginningKey()) || bytes.Compare(key, start) >= 0 + rightCondition := bytes.Equal(end, EndingKey()) || bytes.Compare(key, end) < 0 + return leftCondition && rightCondition } diff --git a/db/util_test.go b/db/util_test.go index 55a41bf5..b273f8d4 100644 --- a/db/util_test.go +++ b/db/util_test.go @@ -5,6 +5,7 @@ import ( "testing" ) +// empty iterator for empty db func TestPrefixIteratorNoMatchNil(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -16,6 +17,7 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) { } } +// empty iterator for db populated after iterator created func TestPrefixIteratorNoMatch1(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -28,12 +30,27 @@ func TestPrefixIteratorNoMatch1(t *testing.T) { } } -func TestPrefixIteratorMatch2(t *testing.T) { +// empty iterator for prefix starting above db entry +func TestPrefixIteratorNoMatch2(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("3"), bz("value_3")) + itr := IteratePrefix(db, []byte("4")) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// iterator with single val for db with single val, starting from that val +func TestPrefixIteratorMatch1(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("2"), bz("value_2")) - itr := IteratePrefix(db, []byte("2")) + itr := IteratePrefix(db, bz("2")) checkValid(t, itr, true) checkItem(t, itr, bz("2"), bz("value_2")) @@ -45,28 +62,22 @@ func TestPrefixIteratorMatch2(t *testing.T) { } } -func TestPrefixIteratorMatch3(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("3"), bz("value_3")) - itr := IteratePrefix(db, []byte("2")) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - -// Search for a/1, fail by too much Next() +// iterator with prefix iterates over everything with same prefix func TestPrefixIteratorMatches1N(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) + + // prefixed db.SetSync(bz("a/1"), bz("value_1")) db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/1")) + + // not + db.SetSync(bz("b/3"), bz("value_3")) + db.SetSync(bz("a-3"), bz("value_3")) + db.SetSync(bz("a.3"), bz("value_3")) + db.SetSync(bz("abcdefg"), bz("value_3")) + itr := IteratePrefix(db, bz("a/")) checkValid(t, itr, true) checkItem(t, itr, bz("a/1"), bz("value_1")) @@ -76,133 +87,7 @@ func TestPrefixIteratorMatches1N(t *testing.T) { // Bad! checkNext(t, itr, false) - // Once invalid... - checkInvalid(t, itr) - }) - } -} - -// Search for a/1, fail by too much Prev() -func TestPrefixIteratorMatches1P(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/1")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - - // Bad! - checkPrev(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - -// Search for a/2, fail by too much Next() -func TestPrefixIteratorMatches2N(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/2")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - - // Bad! - checkNext(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - -// Search for a/2, fail by too much Prev() -func TestPrefixIteratorMatches2P(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/2")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - - // Bad! - checkPrev(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - -// Search for a/3, fail by too much Next() -func TestPrefixIteratorMatches3N(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/3")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - - // Bad! - checkNext(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - -// Search for a/3, fail by too much Prev() -func TestPrefixIteratorMatches3P(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/3")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - - // Bad! - checkPrev(t, itr, false) - - // Once invalid... + //Once invalid... checkInvalid(t, itr) }) }