swarm/storage: remove redundant increments for dataIdx and entryCnt (#17484)

* swarm/storage: remove redundant increments for dataIdx and entryCnt

* swarm/storage: add Delete to LDBStore

* swarm/storage: wait for garbage collection
This commit is contained in:
Anton Evangelatov 2018-09-12 14:39:45 +02:00 committed by Balint Gabor
parent bfce00385f
commit ff3a5d24d2
2 changed files with 35 additions and 31 deletions

View File

@ -147,13 +147,10 @@ func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) {
} }
data, _ := s.db.Get(keyEntryCnt) data, _ := s.db.Get(keyEntryCnt)
s.entryCnt = BytesToU64(data) s.entryCnt = BytesToU64(data)
s.entryCnt++
data, _ = s.db.Get(keyAccessCnt) data, _ = s.db.Get(keyAccessCnt)
s.accessCnt = BytesToU64(data) s.accessCnt = BytesToU64(data)
s.accessCnt++
data, _ = s.db.Get(keyDataIdx) data, _ = s.db.Get(keyDataIdx)
s.dataIdx = BytesToU64(data) s.dataIdx = BytesToU64(data)
s.dataIdx++
return s, nil return s, nil
} }
@ -251,6 +248,8 @@ func decodeOldData(data []byte, chunk *Chunk) {
} }
func (s *LDBStore) collectGarbage(ratio float32) { func (s *LDBStore) collectGarbage(ratio float32) {
log.Trace("collectGarbage", "ratio", ratio)
metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1) metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1)
it := s.db.NewIterator() it := s.db.NewIterator()
@ -492,6 +491,18 @@ func (s *LDBStore) ReIndex() {
log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total)) log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total))
} }
func (s *LDBStore) Delete(addr Address) {
s.lock.Lock()
defer s.lock.Unlock()
ikey := getIndexKey(addr)
var indx dpaDBIndex
s.tryAccessIdx(ikey, &indx)
s.delete(indx.Idx, ikey, s.po(addr))
}
func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) { func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) {
metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1) metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1)
@ -517,8 +528,8 @@ func (s *LDBStore) CurrentBucketStorageIndex(po uint8) uint64 {
} }
func (s *LDBStore) Size() uint64 { func (s *LDBStore) Size() uint64 {
s.lock.Lock() s.lock.RLock()
defer s.lock.Unlock() defer s.lock.RUnlock()
return s.entryCnt return s.entryCnt
} }
@ -602,21 +613,23 @@ mainLoop:
} }
close(c) close(c)
for e > s.capacity { for e > s.capacity {
log.Trace("for >", "e", e, "s.capacity", s.capacity)
// Collect garbage in a separate goroutine // Collect garbage in a separate goroutine
// to be able to interrupt this loop by s.quit. // to be able to interrupt this loop by s.quit.
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
s.collectGarbage(gcArrayFreeRatio) s.collectGarbage(gcArrayFreeRatio)
log.Trace("collectGarbage closing done")
close(done) close(done)
}() }()
e = s.entryCnt
select { select {
case <-s.quit: case <-s.quit:
s.lock.Unlock() s.lock.Unlock()
break mainLoop break mainLoop
case <-done: case <-done:
} }
e = s.entryCnt
} }
s.lock.Unlock() s.lock.Unlock()
} }

View File

@ -27,8 +27,8 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage/mock/mem" "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
ldberrors "github.com/syndtr/goleveldb/leveldb/errors" ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
@ -209,7 +209,7 @@ func testIterator(t *testing.T, mock bool) {
for poc = 0; poc <= 255; poc++ { for poc = 0; poc <= 255; poc++ {
err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool { err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc))) log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
chunkkeys_results[n-1] = k chunkkeys_results[n] = k
i++ i++
return true return true
}) })
@ -324,12 +324,12 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
log.Info("got back chunk", "chunk", ret) log.Info("got back chunk", "chunk", ret)
} }
if ldb.entryCnt != uint64(n+1) { if ldb.entryCnt != uint64(n) {
t.Fatalf("expected entryCnt to be equal to %v, but got %v", n+1, ldb.entryCnt) t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
} }
if ldb.accessCnt != uint64(2*n+1) { if ldb.accessCnt != uint64(2*n) {
t.Fatalf("expected accessCnt to be equal to %v, but got %v", n+1, ldb.accessCnt) t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
} }
} }
@ -362,7 +362,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) {
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
// wait for garbage collection to kick in on the responsible actor // wait for garbage collection to kick in on the responsible actor
time.Sleep(5 * time.Second) time.Sleep(1 * time.Second)
var missing int var missing int
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
@ -416,14 +416,7 @@ func TestLDBStoreAddRemove(t *testing.T) {
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
// delete all even index chunks // delete all even index chunks
if i%2 == 0 { if i%2 == 0 {
ldb.Delete(chunks[i].Addr)
key := chunks[i].Addr
ikey := getIndexKey(key)
var indx dpaDBIndex
ldb.tryAccessIdx(ikey, &indx)
ldb.delete(indx.Idx, ikey, ldb.po(key))
} }
} }
@ -452,12 +445,12 @@ func TestLDBStoreAddRemove(t *testing.T) {
// TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection // TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection
func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
capacity := 10 capacity := 11
ldb, cleanup := newLDBStore(t) ldb, cleanup := newLDBStore(t)
ldb.setCapacity(uint64(capacity)) ldb.setCapacity(uint64(capacity))
n := 7 n := 11
chunks := []*Chunk{} chunks := []*Chunk{}
for i := 0; i < capacity; i++ { for i := 0; i < capacity; i++ {
@ -477,13 +470,7 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
// delete all chunks // delete all chunks
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
key := chunks[i].Addr ldb.Delete(chunks[i].Addr)
ikey := getIndexKey(key)
var indx dpaDBIndex
ldb.tryAccessIdx(ikey, &indx)
ldb.delete(indx.Idx, ikey, ldb.po(key))
} }
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
@ -491,9 +478,10 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
cleanup() cleanup()
ldb, cleanup = newLDBStore(t) ldb, cleanup = newLDBStore(t)
capacity = 10
ldb.setCapacity(uint64(capacity)) ldb.setCapacity(uint64(capacity))
n = 10 n = 11
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
ldb.Put(context.TODO(), chunks[i]) ldb.Put(context.TODO(), chunks[i])
@ -504,6 +492,9 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
<-chunks[i].dbStoredC <-chunks[i].dbStoredC
} }
// wait for garbage collection
time.Sleep(1 * time.Second)
// expect for first chunk to be missing, because it has the smallest access value // expect for first chunk to be missing, because it has the smallest access value
idx := 0 idx := 0
ret, err := ldb.Get(context.TODO(), chunks[idx].Addr) ret, err := ldb.Get(context.TODO(), chunks[idx].Addr)