only downsize underlying arrays if they're much too large

This commit is contained in:
Dan Laine 2020-06-23 16:44:02 -04:00
parent 19a3f69e99
commit 6c6136d551
9 changed files with 76 additions and 24 deletions

14
database/common.go Normal file
View File

@ -0,0 +1,14 @@
package database
const (
// MaxExcessCapacityFactor ...
// If, when a batch is reset, the cap(batch)/len(batch) > MaxExcessCapacityFactor,
// the underlying array's capacity will be reduced by a factor of capacityReductionFactor.
// Higher value for MaxExcessCapacityFactor --> less aggressive array downsizing --> less memory allocations
// but more unnecessary data in the underlying array that can't be garbage collected.
// Higher value for CapacityReductionFactor --> more aggressive array downsizing --> more memory allocations
// but less unnecessary data in the underlying array that can't be garbage collected.
MaxExcessCapacityFactor = 4
// CapacityReductionFactor ...
CapacityReductionFactor = 2
)

View File

@ -17,10 +17,6 @@ import (
"github.com/ava-labs/gecko/utils/hashing"
)
const (
minBatchSize = 32
)
// Database encrypts all values that are provided
type Database struct {
lock sync.RWMutex
@ -205,7 +201,11 @@ func (b *batch) Write() error {
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.writes = make([]keyValue, 0, minBatchSize)
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.Batch.Reset()
}

View File

@ -15,8 +15,7 @@ import (
const (
// DefaultSize is the default initial size of the memory database
DefaultSize = 1 << 10
minBatchSize = 32
DefaultSize = 1 << 10
)
// Database is an ephemeral key-value store that implements the Database
@ -194,7 +193,11 @@ func (b *batch) Write() error {
// Reset implements the Batch interface
func (b *batch) Reset() {
b.writes = make([]keyValue, 0, minBatchSize)
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.size = 0
}

View File

@ -12,10 +12,6 @@ import (
"github.com/ava-labs/gecko/utils/hashing"
)
const (
minBatchSize = 32
)
// Database partitions a database into a sub-database by prefixing all keys with
// a unique value.
type Database struct {
@ -203,7 +199,11 @@ func (b *batch) Write() error {
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.writes = make([]keyValue, 0, minBatchSize)
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.Batch.Reset()
}

View File

@ -14,10 +14,6 @@ import (
"github.com/ava-labs/gecko/utils"
)
const (
minBatchSize = 32
)
var (
errClosed = fmt.Sprintf("rpc error: code = Unknown desc = %s", database.ErrClosed)
errNotFound = fmt.Sprintf("rpc error: code = Unknown desc = %s", database.ErrNotFound)
@ -184,7 +180,11 @@ func (b *batch) Write() error {
}
func (b *batch) Reset() {
b.writes = make([]keyValue, 0, minBatchSize)
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.size = 0
}

View File

@ -305,7 +305,11 @@ func (b *batch) Write() error {
// Reset implements the Database interface
func (b *batch) Reset() {
b.writes = make([]keyValue, 0)
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.size = 0
}

View File

@ -58,10 +58,10 @@ func (ids *ShortSet) Remove(idList ...ShortID) {
func (ids *ShortSet) Clear() { *ids = nil }
// CappedList returns a list of length at most [size].
// Size should be >= 0. If size < 0, returns empty list.
// Size should be >= 0. If size < 0, returns nil.
func (ids ShortSet) CappedList(size int) []ShortID {
if size < 0 {
return make([]ShortID, 0, 0)
return nil
}
if l := ids.Len(); l < size {
size = l

View File

@ -13,6 +13,19 @@ import (
"github.com/ava-labs/gecko/utils/random"
)
const (
// maxExcessCapacityFactor ...
// If, when the validator set is reset, cap(set)/len(set) > MaxExcessCapacityFactor,
// the underlying arrays' capacities will be reduced by a factor of capacityReductionFactor.
// Higher value for maxExcessCapacityFactor --> less aggressive array downsizing --> less memory allocations
// but more unnecessary data in the underlying array that can't be garbage collected.
// Higher value for capacityReductionFactor --> more aggressive array downsizing --> more memory allocations
// but less unnecessary data in the underlying array that can't be garbage collected.
maxExcessCapacityFactor = 4
// CapacityReductionFactor ...
capacityReductionFactor = 2
)
// Set of validators that can be sampled
type Set interface {
fmt.Stringer
@ -72,9 +85,27 @@ func (s *set) Set(vdrs []Validator) {
func (s *set) set(vdrs []Validator) {
lenVdrs := len(vdrs)
// If the underlying arrays are much larger than necessary, resize them to
// allow garbage collection of unused memory
if cap(s.vdrSlice) > len(s.vdrSlice)*maxExcessCapacityFactor {
newCap := cap(s.vdrSlice) / capacityReductionFactor
if newCap < lenVdrs {
newCap = lenVdrs
}
s.vdrSlice = make([]Validator, 0, newCap)
} else {
s.vdrSlice = s.vdrSlice[:0]
}
if cap(s.sampler.Weights) > len(s.sampler.Weights)*maxExcessCapacityFactor {
newCap := cap(s.sampler.Weights) / capacityReductionFactor
if newCap < lenVdrs {
newCap = lenVdrs
}
s.sampler.Weights = make([]uint64, 0, newCap)
} else {
s.sampler.Weights = s.sampler.Weights[:0]
}
s.vdrMap = make(map[[20]byte]int, lenVdrs)
s.vdrSlice = make([]Validator, 0, lenVdrs)
s.sampler.Weights = make([]uint64, 0, lenVdrs)
for _, vdr := range vdrs {
s.add(vdr)

View File

@ -808,7 +808,7 @@ func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator {
validator.Wght = weight
}
vdrList := make([]validators.Validator, len(vdrMap), len(vdrMap))
vdrList := make([]validators.Validator, len(vdrMap))
i := 0
for _, validator := range vdrMap {
vdrList[i] = validator