Modified all db types to conform to the interface changes.
This commit is contained in:
parent
d6205eb4ca
commit
6064c80250
|
@ -106,6 +106,23 @@ func (db *CLevelDB) Print() {
|
|||
}
|
||||
}
|
||||
|
||||
func (db *CLevelDB) Stats() map[string]string {
|
||||
keys := []string{}
|
||||
|
||||
stats := make(map[string]string)
|
||||
for _, key := range keys {
|
||||
str, err := db.db.GetProperty(key)
|
||||
if err == nil {
|
||||
stats[key] = str
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (db *CLevelDB) Iterator() Iterator {
|
||||
return db.db.NewIterator(nil, nil)
|
||||
}
|
||||
|
||||
func (db *CLevelDB) NewBatch() Batch {
|
||||
batch := levigo.NewWriteBatch()
|
||||
return &cLevelDBBatch{db, batch}
|
||||
|
|
5
db.go
5
db.go
|
@ -12,9 +12,9 @@ type DB interface {
|
|||
NewBatch() Batch
|
||||
|
||||
// For debugging
|
||||
Iterator() Iterator
|
||||
Next(Iterator) (key []byte, value []byte)
|
||||
Print()
|
||||
Iterator() Iterator
|
||||
Stats() map[string]string
|
||||
}
|
||||
|
||||
type Batch interface {
|
||||
|
@ -25,6 +25,7 @@ type Batch interface {
|
|||
|
||||
type Iterator interface {
|
||||
Next() bool
|
||||
|
||||
Key() []byte
|
||||
Value() []byte
|
||||
}
|
||||
|
|
|
@ -82,6 +82,9 @@ func (db *GoLevelDB) Close() {
|
|||
}
|
||||
|
||||
func (db *GoLevelDB) Print() {
|
||||
str, _ := db.db.GetProperty("leveldb.stats")
|
||||
fmt.Printf("%v\n", str)
|
||||
|
||||
iter := db.db.NewIterator(nil, nil)
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
|
@ -90,15 +93,30 @@ func (db *GoLevelDB) Print() {
|
|||
}
|
||||
}
|
||||
|
||||
func (db *GoLevelDB) Iterator() Iterator {
|
||||
return db.db.NewIterator(nil, nil)
|
||||
func (db *GoLevelDB) Stats() map[string]string {
|
||||
keys := []string{
|
||||
"leveldb.num-files-at-level{n}",
|
||||
"leveldb.stats",
|
||||
"leveldb.sstables",
|
||||
"leveldb.blockpool",
|
||||
"leveldb.cachedblock",
|
||||
"leveldb.openedtables",
|
||||
"leveldb.alivesnaps",
|
||||
"leveldb.alibeiters",
|
||||
}
|
||||
|
||||
stats := make(map[string]string)
|
||||
for _, key := range keys {
|
||||
str, err := db.db.GetProperty(key)
|
||||
if err == nil {
|
||||
stats[key] = str
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (db *GoLevelDB) Next(iter Iterator) ([]byte, []byte) {
|
||||
if iter.Next() {
|
||||
return iter.Key(), iter.Value()
|
||||
}
|
||||
return nil, nil
|
||||
func (db *GoLevelDB) Iterator() Iterator {
|
||||
return db.db.NewIterator(nil, nil)
|
||||
}
|
||||
|
||||
func (db *GoLevelDB) NewBatch() Batch {
|
||||
|
|
49
mem_db.go
49
mem_db.go
|
@ -65,14 +65,51 @@ func (db *MemDB) Print() {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: needs to be wired to range db.db
|
||||
func (db *MemDB) Iterator() Iterator {
|
||||
return nil
|
||||
func (db *MemDB) Stats() map[string]string {
|
||||
stats := make(map[string]string)
|
||||
stats["database.type"] = "memDB"
|
||||
return stats
|
||||
}
|
||||
|
||||
// TODO: needs to be wired to range db.db
|
||||
func (db *MemDB) Next(iter Iterator) (key []byte, value []byte) {
|
||||
return nil, nil
|
||||
type memDBIterator struct {
|
||||
last int
|
||||
keys []string
|
||||
db *MemDB
|
||||
}
|
||||
|
||||
func (it *memDBIterator) Create(db *MemDB) *memDBIterator {
|
||||
if it == nil {
|
||||
it = &memDBIterator{}
|
||||
}
|
||||
it.db = db
|
||||
it.last = -1
|
||||
|
||||
// unfortunately we need a copy of all of the keys
|
||||
for key, _ := range db.db {
|
||||
it.keys = append(it.keys, key)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (it *memDBIterator) Next() bool {
|
||||
if it.last >= len(it.keys) {
|
||||
return false
|
||||
}
|
||||
it.last++
|
||||
return true
|
||||
}
|
||||
|
||||
func (it *memDBIterator) Key() []byte {
|
||||
return []byte(it.keys[it.last])
|
||||
}
|
||||
|
||||
func (it *memDBIterator) Value() []byte {
|
||||
return it.db.db[it.keys[it.last]]
|
||||
}
|
||||
|
||||
func (db *MemDB) Iterator() Iterator {
|
||||
var it *memDBIterator
|
||||
return it.Create(db)
|
||||
}
|
||||
|
||||
func (db *MemDB) NewBatch() Batch {
|
||||
|
|
Loading…
Reference in New Issue