commit
e09e3a56f3
21
CHANGELOG.md
21
CHANGELOG.md
|
@ -1,5 +1,26 @@
|
|||
# Changelog
|
||||
|
||||
## 0.9.0
|
||||
|
||||
*June 24th, 2018*
|
||||
|
||||
BREAKING:
|
||||
- [events, pubsub] Removed - moved to github.com/tendermint/tendermint
|
||||
- [merkle] Use 20-bytes of SHA256 instead of RIPEMD160. NOTE: this package is
|
||||
moving to github.com/tendermint/go-crypto !
|
||||
- [common] Remove gogoproto from KVPair types
|
||||
- [common] Error simplification, #220
|
||||
|
||||
FEATURES:
|
||||
|
||||
- [db/remotedb] New DB type using an external CLevelDB process via
|
||||
GRPC
|
||||
- [autofile] logjack command for piping stdin to a rotating file
|
||||
- [bech32] New package. NOTE: should move out of here - it's just two small
|
||||
functions
|
||||
- [common] ColoredBytes([]byte) string for printing mixed ascii and bytes
|
||||
- [db] DebugDB uses ColoredBytes()
|
||||
|
||||
## 0.8.4
|
||||
|
||||
*June 5, 2018*
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/btcutil"
|
||||
packages = ["bech32"]
|
||||
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
|
@ -42,14 +48,16 @@
|
|||
version = "v1.6.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"gogoproto",
|
||||
"proto",
|
||||
"protoc-gen-gogo/descriptor"
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
]
|
||||
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
|
||||
version = "v1.0.0"
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -185,9 +193,18 @@
|
|||
revision = "b89cc31ef7977104127d34c1bd31ebd1a9db2199"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ripemd160"]
|
||||
revision = "edd5e9b0879d13ee6970a50153d85b8fec9f7686"
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"trace"
|
||||
]
|
||||
revision = "d11bb6cd8e3c4e60239c9cb20ef68586d74500d0"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/sys"
|
||||
|
@ -197,15 +214,59 @@
|
|||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm"
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "c01e4764d870b77f8abe5096ee19ad20d80e8075"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "86e600f69ee4704c6efbf6a2a40a5c10700e76c2"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclb/grpc_lb_v1/messages",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
]
|
||||
revision = "d11072e7ca9811b1100b80ca0269ac831f06d024"
|
||||
version = "v1.11.3"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
|
@ -215,6 +276,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "c33ff784e40965e1cd0ec6232b43e379c6608cb41a9c5c707247742b68c906fb"
|
||||
inputs-digest = "e0c0af880b57928787ea78a820abefd2759e6aee4cba18e67ab36b80e62ad581"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -61,6 +61,9 @@
|
|||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/btcsuite/btcutil"
|
||||
branch ="master"
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
|
34
Makefile
34
Makefile
|
@ -1,13 +1,13 @@
|
|||
GOTOOLS = \
|
||||
github.com/golang/dep/cmd/dep \
|
||||
github.com/gogo/protobuf/protoc-gen-gogo \
|
||||
github.com/gogo/protobuf/gogoproto
|
||||
github.com/golang/protobuf/protoc-gen-go \
|
||||
github.com/square/certstrap
|
||||
# github.com/alecthomas/gometalinter.v2 \
|
||||
|
||||
GOTOOLS_CHECK = dep gometalinter.v2 protoc protoc-gen-gogo
|
||||
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
|
||||
GOTOOLS_CHECK = dep gometalinter.v2 protoc protoc-gen-go
|
||||
INCLUDE = -I=. -I=${GOPATH}/src
|
||||
|
||||
all: check get_vendor_deps protoc build test install metalinter
|
||||
all: check get_vendor_deps protoc grpc_dbserver build test install metalinter
|
||||
|
||||
check: check_tools
|
||||
|
||||
|
@ -18,7 +18,7 @@ protoc:
|
|||
## If you get the following error,
|
||||
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
|
||||
## See https://stackoverflow.com/a/25518702
|
||||
protoc $(INCLUDE) --gogo_out=plugins=grpc:. common/*.proto
|
||||
protoc $(INCLUDE) --go_out=plugins=grpc:. common/*.proto
|
||||
@echo "--> adding nolint declarations to protobuf generated files"
|
||||
@awk '/package common/ { print "//nolint: gas"; print; next }1' common/types.pb.go > common/types.pb.go.new
|
||||
@mv common/types.pb.go.new common/types.pb.go
|
||||
|
@ -66,8 +66,21 @@ get_vendor_deps:
|
|||
########################################
|
||||
### Testing
|
||||
|
||||
test:
|
||||
go test -tags gcc $(shell go list ./... | grep -v vendor)
|
||||
gen_certs: clean_certs
|
||||
## Generating certificates for TLS testing...
|
||||
certstrap init --common-name "tendermint.com" --passphrase ""
|
||||
certstrap request-cert -ip "::" --passphrase ""
|
||||
certstrap sign "::" --CA "tendermint.com" --passphrase ""
|
||||
mv out/::.crt out/::.key db/remotedb
|
||||
|
||||
clean_certs:
|
||||
## Cleaning TLS testing certificates...
|
||||
rm -rf out
|
||||
rm -f db/remotedb/::.crt db/remotedb/::.key
|
||||
|
||||
test: gen_certs
|
||||
GOCACHE=off go test -tags gcc $(shell go list ./... | grep -v vendor)
|
||||
make clean_certs
|
||||
|
||||
test100:
|
||||
@for i in {1..100}; do make test; done
|
||||
|
@ -118,4 +131,7 @@ metalinter_all:
|
|||
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||
# unless there is a reason not to.
|
||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||
.PHONY: check protoc build check_tools get_tools get_protoc update_tools get_vendor_deps test fmt metalinter metalinter_all
|
||||
.PHONY: check protoc build check_tools get_tools get_protoc update_tools get_vendor_deps test fmt metalinter metalinter_all gen_certs clean_certs
|
||||
|
||||
grpc_dbserver:
|
||||
protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
auto "github.com/tendermint/tmlibs/autofile"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
const Version = "0.0.1"
|
||||
const sleepSeconds = 1 // Every second
|
||||
const readBufferSize = 1024 // 1KB at a time
|
||||
|
||||
// Parse command-line options
|
||||
func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) {
|
||||
var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
|
||||
var chopSizeStr, limitSizeStr string
|
||||
flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.")
|
||||
flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this")
|
||||
flagSet.StringVar(&limitSizeStr, "limit", "10G", "Only keep this much (for each specified file). Remove old files.")
|
||||
flagSet.BoolVar(&version, "version", false, "Version")
|
||||
flagSet.Parse(os.Args[1:])
|
||||
chopSize = parseBytesize(chopSizeStr)
|
||||
limitSize = parseBytesize(limitSizeStr)
|
||||
return
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
// Read options
|
||||
headPath, chopSize, limitSize, version := parseFlags()
|
||||
if version {
|
||||
fmt.Printf("logjack version %v\n", Version)
|
||||
return
|
||||
}
|
||||
|
||||
// Open Group
|
||||
group, err := auto.OpenGroup(headPath)
|
||||
if err != nil {
|
||||
fmt.Printf("logjack couldn't create output file %v\n", headPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
group.SetHeadSizeLimit(chopSize)
|
||||
group.SetTotalSizeLimit(limitSize)
|
||||
err = group.Start()
|
||||
if err != nil {
|
||||
fmt.Printf("logjack couldn't start with file %v\n", headPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
go func() {
|
||||
// Forever, read from stdin and write to AutoFile.
|
||||
buf := make([]byte, readBufferSize)
|
||||
for {
|
||||
n, err := os.Stdin.Read(buf)
|
||||
group.Write(buf[:n])
|
||||
group.Flush()
|
||||
if err != nil {
|
||||
group.Stop()
|
||||
if err == io.EOF {
|
||||
os.Exit(0)
|
||||
} else {
|
||||
fmt.Println("logjack errored")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Trap signal
|
||||
cmn.TrapSignal(func() {
|
||||
fmt.Println("logjack shutting down")
|
||||
})
|
||||
}
|
||||
|
||||
func parseBytesize(chopSize string) int64 {
|
||||
// Handle suffix multiplier
|
||||
var multiplier int64 = 1
|
||||
if strings.HasSuffix(chopSize, "T") {
|
||||
multiplier = 1042 * 1024 * 1024 * 1024
|
||||
chopSize = chopSize[:len(chopSize)-1]
|
||||
}
|
||||
if strings.HasSuffix(chopSize, "G") {
|
||||
multiplier = 1042 * 1024 * 1024
|
||||
chopSize = chopSize[:len(chopSize)-1]
|
||||
}
|
||||
if strings.HasSuffix(chopSize, "M") {
|
||||
multiplier = 1042 * 1024
|
||||
chopSize = chopSize[:len(chopSize)-1]
|
||||
}
|
||||
if strings.HasSuffix(chopSize, "K") {
|
||||
multiplier = 1042
|
||||
chopSize = chopSize[:len(chopSize)-1]
|
||||
}
|
||||
|
||||
// Parse the numeric part
|
||||
chopSizeInt, err := strconv.Atoi(chopSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return int64(chopSizeInt) * multiplier
|
||||
}
|
|
@ -32,6 +32,7 @@ func createTestGroup(t *testing.T, headSizeLimit int64) *Group {
|
|||
}
|
||||
|
||||
func destroyTestGroup(t *testing.T, g *Group) {
|
||||
g.Close()
|
||||
err := os.RemoveAll(g.Dir)
|
||||
require.NoError(t, err, "Error removing test Group directory")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
package bech32
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcutil/bech32"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
//ConvertAndEncode converts from a base64 encoded byte string to base32 encoded byte string and then to bech32
|
||||
func ConvertAndEncode(hrp string, data []byte) (string, error) {
|
||||
converted, err := bech32.ConvertBits(data, 8, 5, true)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "encoding bech32 failed")
|
||||
}
|
||||
return bech32.Encode(hrp, converted)
|
||||
|
||||
}
|
||||
|
||||
//DecodeAndConvert decodes a bech32 encoded string and converts to base64 encoded bytes
|
||||
func DecodeAndConvert(bech string) (string, []byte, error) {
|
||||
hrp, data, err := bech32.Decode(bech)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "decoding bech32 failed")
|
||||
}
|
||||
converted, err := bech32.ConvertBits(data, 5, 8, false)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "decoding bech32 failed")
|
||||
}
|
||||
return hrp, converted, nil
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
package bech32_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tmlibs/bech32"
|
||||
)
|
||||
|
||||
func TestEncodeAndDecode(t *testing.T) {
|
||||
|
||||
sum := sha256.Sum256([]byte("hello world\n"))
|
||||
|
||||
bech, err := bech32.ConvertAndEncode("shasum", sum[:])
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
hrp, data, err := bech32.DecodeAndConvert(bech)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if hrp != "shasum" {
|
||||
t.Error("Invalid hrp")
|
||||
}
|
||||
if bytes.Compare(data, sum[:]) != 0 {
|
||||
t.Error("Invalid decode")
|
||||
}
|
||||
}
|
|
@ -135,7 +135,7 @@ func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, val inte
|
|||
if err != nil {
|
||||
assert.Equal(t, err, taskResult.Error, taskName)
|
||||
} else if pnk != nil {
|
||||
assert.Equal(t, pnk, taskResult.Error.(Error).Cause(), taskName)
|
||||
assert.Equal(t, pnk, taskResult.Error.(Error).Data(), taskName)
|
||||
} else {
|
||||
assert.Nil(t, taskResult.Error, taskName)
|
||||
}
|
||||
|
|
|
@ -81,3 +81,15 @@ func Cyan(args ...interface{}) string {
|
|||
func White(args ...interface{}) string {
|
||||
return treatAll(ANSIFgWhite, args...)
|
||||
}
|
||||
|
||||
func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string) string {
|
||||
s := ""
|
||||
for _, b := range data {
|
||||
if 0x21 <= b && b < 0x7F {
|
||||
s += textColor(string(b))
|
||||
} else {
|
||||
s += bytesColor(Fmt("%02X", b))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
|
173
common/errors.go
173
common/errors.go
|
@ -6,106 +6,81 @@ import (
|
|||
)
|
||||
|
||||
//----------------------------------------
|
||||
// Convenience methods
|
||||
// Convenience method.
|
||||
|
||||
// ErrorWrap will just call .TraceFrom(), or create a new *cmnError.
|
||||
func ErrorWrap(cause interface{}, format string, args ...interface{}) Error {
|
||||
msg := Fmt(format, args...)
|
||||
if causeCmnError, ok := cause.(*cmnError); ok {
|
||||
return causeCmnError.TraceFrom(1, msg)
|
||||
msg := Fmt(format, args...)
|
||||
return causeCmnError.Stacktrace().Trace(1, msg)
|
||||
} else if cause == nil {
|
||||
return newCmnError(FmtError{format, args}).Stacktrace()
|
||||
} else {
|
||||
// NOTE: causeCmnError is a typed nil here.
|
||||
msg := Fmt(format, args...)
|
||||
return newCmnError(cause).Stacktrace().Trace(1, msg)
|
||||
}
|
||||
// NOTE: cause may be nil.
|
||||
// NOTE: do not use causeCmnError here, not the same as nil.
|
||||
return newError(msg, cause, cause).Stacktrace()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Error & cmnError
|
||||
|
||||
/*
|
||||
Usage:
|
||||
|
||||
Usage with arbitrary error data:
|
||||
|
||||
```go
|
||||
// Error construction
|
||||
var someT = errors.New("Some err type")
|
||||
var err1 error = NewErrorWithT(someT, "my message")
|
||||
type MyError struct{}
|
||||
var err1 error = NewErrorWithData(MyError{}, "my message")
|
||||
...
|
||||
// Wrapping
|
||||
var err2 error = ErrorWrap(err1, "another message")
|
||||
if (err1 != err2) { panic("should be the same")
|
||||
...
|
||||
// Error handling
|
||||
switch err2.T() {
|
||||
case someT: ...
|
||||
switch err2.Data().(type){
|
||||
case MyError: ...
|
||||
default: ...
|
||||
}
|
||||
```
|
||||
|
||||
*/
|
||||
type Error interface {
|
||||
Error() string
|
||||
Message() string
|
||||
Stacktrace() Error
|
||||
Trace(format string, args ...interface{}) Error
|
||||
TraceFrom(offset int, format string, args ...interface{}) Error
|
||||
Cause() interface{}
|
||||
WithT(t interface{}) Error
|
||||
T() interface{}
|
||||
Format(s fmt.State, verb rune)
|
||||
Trace(offset int, format string, args ...interface{}) Error
|
||||
Data() interface{}
|
||||
}
|
||||
|
||||
// New Error with no cause where the type is the format string of the message..
|
||||
// New Error with formatted message.
|
||||
// The Error's Data will be a FmtError type.
|
||||
func NewError(format string, args ...interface{}) Error {
|
||||
msg := Fmt(format, args...)
|
||||
return newError(msg, nil, format)
|
||||
|
||||
err := FmtError{format, args}
|
||||
return newCmnError(err)
|
||||
}
|
||||
|
||||
// New Error with specified type and message.
|
||||
func NewErrorWithT(t interface{}, format string, args ...interface{}) Error {
|
||||
msg := Fmt(format, args...)
|
||||
return newError(msg, nil, t)
|
||||
}
|
||||
|
||||
// NOTE: The name of a function "NewErrorWithCause()" implies that you are
|
||||
// creating a new Error, yet, if the cause is an Error, creating a new Error to
|
||||
// hold a ref to the old Error is probably *not* what you want to do.
|
||||
// So, use ErrorWrap(cause, format, a...) instead, which returns the same error
|
||||
// if cause is an Error.
|
||||
// IF you must set an Error as the cause of an Error,
|
||||
// then you can use the WithCauser interface to do so manually.
|
||||
// e.g. (error).(tmlibs.WithCauser).WithCause(causeError)
|
||||
|
||||
type WithCauser interface {
|
||||
WithCause(cause interface{}) Error
|
||||
// New Error with specified data.
|
||||
func NewErrorWithData(data interface{}) Error {
|
||||
return newCmnError(data)
|
||||
}
|
||||
|
||||
type cmnError struct {
|
||||
msg string // first msg which also appears in msg
|
||||
cause interface{} // underlying cause (or panic object)
|
||||
t interface{} // for switching on error
|
||||
data interface{} // associated data
|
||||
msgtraces []msgtraceItem // all messages traced
|
||||
stacktrace []uintptr // first stack trace
|
||||
}
|
||||
|
||||
var _ WithCauser = &cmnError{}
|
||||
var _ Error = &cmnError{}
|
||||
|
||||
// NOTE: do not expose.
|
||||
func newError(msg string, cause interface{}, t interface{}) *cmnError {
|
||||
func newCmnError(data interface{}) *cmnError {
|
||||
return &cmnError{
|
||||
msg: msg,
|
||||
cause: cause,
|
||||
t: t,
|
||||
data: data,
|
||||
msgtraces: nil,
|
||||
stacktrace: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (err *cmnError) Message() string {
|
||||
return err.msg
|
||||
}
|
||||
|
||||
// Implements error.
|
||||
func (err *cmnError) Error() string {
|
||||
return fmt.Sprintf("%v", err)
|
||||
}
|
||||
|
@ -121,42 +96,17 @@ func (err *cmnError) Stacktrace() Error {
|
|||
}
|
||||
|
||||
// Add tracing information with msg.
|
||||
func (err *cmnError) Trace(format string, args ...interface{}) Error {
|
||||
msg := Fmt(format, args...)
|
||||
return err.doTrace(msg, 0)
|
||||
}
|
||||
|
||||
// Same as Trace, but traces the line `offset` calls out.
|
||||
// If n == 0, the behavior is identical to Trace().
|
||||
func (err *cmnError) TraceFrom(offset int, format string, args ...interface{}) Error {
|
||||
// Set n=0 unless wrapped with some function, then n > 0.
|
||||
func (err *cmnError) Trace(offset int, format string, args ...interface{}) Error {
|
||||
msg := Fmt(format, args...)
|
||||
return err.doTrace(msg, offset)
|
||||
}
|
||||
|
||||
// Return last known cause.
|
||||
// NOTE: The meaning of "cause" is left for the caller to define.
|
||||
// There exists no "canonical" definition of "cause".
|
||||
// Instead of blaming, try to handle it, or organize it.
|
||||
func (err *cmnError) Cause() interface{} {
|
||||
return err.cause
|
||||
}
|
||||
|
||||
// Overwrites the Error's cause.
|
||||
func (err *cmnError) WithCause(cause interface{}) Error {
|
||||
err.cause = cause
|
||||
return err
|
||||
}
|
||||
|
||||
// Overwrites the Error's type.
|
||||
func (err *cmnError) WithT(t interface{}) Error {
|
||||
err.t = t
|
||||
return err
|
||||
}
|
||||
|
||||
// Return the "type" of this message, primarily for switching
|
||||
// to handle this Error.
|
||||
func (err *cmnError) T() interface{} {
|
||||
return err.t
|
||||
// Return the "data" of this error.
|
||||
// Data could be used for error handling/switching,
|
||||
// or for holding general error/debug information.
|
||||
func (err *cmnError) Data() interface{} {
|
||||
return err.data
|
||||
}
|
||||
|
||||
func (err *cmnError) doTrace(msg string, n int) Error {
|
||||
|
@ -177,12 +127,8 @@ func (err *cmnError) Format(s fmt.State, verb rune) {
|
|||
default:
|
||||
if s.Flag('#') {
|
||||
s.Write([]byte("--= Error =--\n"))
|
||||
// Write msg.
|
||||
s.Write([]byte(fmt.Sprintf("Message: %s\n", err.msg)))
|
||||
// Write cause.
|
||||
s.Write([]byte(fmt.Sprintf("Cause: %#v\n", err.cause)))
|
||||
// Write type.
|
||||
s.Write([]byte(fmt.Sprintf("T: %#v\n", err.t)))
|
||||
// Write data.
|
||||
s.Write([]byte(fmt.Sprintf("Data: %#v\n", err.data)))
|
||||
// Write msg trace items.
|
||||
s.Write([]byte(fmt.Sprintf("Msg Traces:\n")))
|
||||
for i, msgtrace := range err.msgtraces {
|
||||
|
@ -200,11 +146,7 @@ func (err *cmnError) Format(s fmt.State, verb rune) {
|
|||
s.Write([]byte("--= /Error =--\n"))
|
||||
} else {
|
||||
// Write msg.
|
||||
if err.cause != nil {
|
||||
s.Write([]byte(fmt.Sprintf("Error{`%s` (cause: %v)}", err.msg, err.cause))) // TODO tick-esc?
|
||||
} else {
|
||||
s.Write([]byte(fmt.Sprintf("Error{`%s`}", err.msg))) // TODO tick-esc?
|
||||
}
|
||||
s.Write([]byte(fmt.Sprintf("Error{%v}", err.data))) // TODO tick-esc?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -232,6 +174,45 @@ func (mti msgtraceItem) String() string {
|
|||
)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// fmt error
|
||||
|
||||
/*
|
||||
|
||||
FmtError is the data type for NewError() (e.g. NewError().Data().(FmtError))
|
||||
Theoretically it could be used to switch on the format string.
|
||||
|
||||
```go
|
||||
// Error construction
|
||||
var err1 error = NewError("invalid username %v", "BOB")
|
||||
var err2 error = NewError("another kind of error")
|
||||
...
|
||||
// Error handling
|
||||
switch err1.Data().(cmn.FmtError).Format() {
|
||||
case "invalid username %v": ...
|
||||
case "another kind of error": ...
|
||||
default: ...
|
||||
}
|
||||
```
|
||||
*/
|
||||
type FmtError struct {
|
||||
format string
|
||||
args []interface{}
|
||||
}
|
||||
|
||||
func (fe FmtError) Error() string {
|
||||
return fmt.Sprintf(fe.format, fe.args...)
|
||||
}
|
||||
|
||||
func (fe FmtError) String() string {
|
||||
return fmt.Sprintf("FmtError{format:%v,args:%v}",
|
||||
fe.format, fe.args)
|
||||
}
|
||||
|
||||
func (fe FmtError) Format() string {
|
||||
return fe.format
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Panic wrappers
|
||||
// XXX DEPRECATED
|
||||
|
|
|
@ -25,11 +25,9 @@ func TestErrorPanic(t *testing.T) {
|
|||
|
||||
var err = capturePanic()
|
||||
|
||||
assert.Equal(t, pnk{"something"}, err.Cause())
|
||||
assert.Equal(t, pnk{"something"}, err.T())
|
||||
assert.Equal(t, "This is the message in ErrorWrap(r, message).", err.Message())
|
||||
assert.Equal(t, "Error{`This is the message in ErrorWrap(r, message).` (cause: {something})}", fmt.Sprintf("%v", err))
|
||||
assert.Contains(t, fmt.Sprintf("%#v", err), "Message: This is the message in ErrorWrap(r, message).")
|
||||
assert.Equal(t, pnk{"something"}, err.Data())
|
||||
assert.Equal(t, "Error{{something}}", fmt.Sprintf("%v", err))
|
||||
assert.Contains(t, fmt.Sprintf("%#v", err), "This is the message in ErrorWrap(r, message).")
|
||||
assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0")
|
||||
}
|
||||
|
||||
|
@ -37,11 +35,9 @@ func TestErrorWrapSomething(t *testing.T) {
|
|||
|
||||
var err = ErrorWrap("something", "formatter%v%v", 0, 1)
|
||||
|
||||
assert.Equal(t, "something", err.Cause())
|
||||
assert.Equal(t, "something", err.T())
|
||||
assert.Equal(t, "formatter01", err.Message())
|
||||
assert.Equal(t, "Error{`formatter01` (cause: something)}", fmt.Sprintf("%v", err))
|
||||
assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err))
|
||||
assert.Equal(t, "something", err.Data())
|
||||
assert.Equal(t, "Error{something}", fmt.Sprintf("%v", err))
|
||||
assert.Regexp(t, `formatter01\n`, fmt.Sprintf("%#v", err))
|
||||
assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0")
|
||||
}
|
||||
|
||||
|
@ -49,11 +45,11 @@ func TestErrorWrapNothing(t *testing.T) {
|
|||
|
||||
var err = ErrorWrap(nil, "formatter%v%v", 0, 1)
|
||||
|
||||
assert.Equal(t, nil, err.Cause())
|
||||
assert.Equal(t, nil, err.T())
|
||||
assert.Equal(t, "formatter01", err.Message())
|
||||
assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err))
|
||||
assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err))
|
||||
assert.Equal(t,
|
||||
FmtError{"formatter%v%v", []interface{}{0, 1}},
|
||||
err.Data())
|
||||
assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err))
|
||||
assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`)
|
||||
assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0")
|
||||
}
|
||||
|
||||
|
@ -61,11 +57,11 @@ func TestErrorNewError(t *testing.T) {
|
|||
|
||||
var err = NewError("formatter%v%v", 0, 1)
|
||||
|
||||
assert.Equal(t, nil, err.Cause())
|
||||
assert.Equal(t, "formatter%v%v", err.T())
|
||||
assert.Equal(t, "formatter01", err.Message())
|
||||
assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err))
|
||||
assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err))
|
||||
assert.Equal(t,
|
||||
FmtError{"formatter%v%v", []interface{}{0, 1}},
|
||||
err.Data())
|
||||
assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err))
|
||||
assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`)
|
||||
assert.NotContains(t, fmt.Sprintf("%#v", err), "Stack Trace")
|
||||
}
|
||||
|
||||
|
@ -73,26 +69,26 @@ func TestErrorNewErrorWithStacktrace(t *testing.T) {
|
|||
|
||||
var err = NewError("formatter%v%v", 0, 1).Stacktrace()
|
||||
|
||||
assert.Equal(t, nil, err.Cause())
|
||||
assert.Equal(t, "formatter%v%v", err.T())
|
||||
assert.Equal(t, "formatter01", err.Message())
|
||||
assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err))
|
||||
assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err))
|
||||
assert.Equal(t,
|
||||
FmtError{"formatter%v%v", []interface{}{0, 1}},
|
||||
err.Data())
|
||||
assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err))
|
||||
assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`)
|
||||
assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0")
|
||||
}
|
||||
|
||||
func TestErrorNewErrorWithTrace(t *testing.T) {
|
||||
|
||||
var err = NewError("formatter%v%v", 0, 1)
|
||||
err.Trace("trace %v", 1)
|
||||
err.Trace("trace %v", 2)
|
||||
err.Trace("trace %v", 3)
|
||||
err.Trace(0, "trace %v", 1)
|
||||
err.Trace(0, "trace %v", 2)
|
||||
err.Trace(0, "trace %v", 3)
|
||||
|
||||
assert.Equal(t, nil, err.Cause())
|
||||
assert.Equal(t, "formatter%v%v", err.T())
|
||||
assert.Equal(t, "formatter01", err.Message())
|
||||
assert.Equal(t, "Error{`formatter01`}", fmt.Sprintf("%v", err))
|
||||
assert.Regexp(t, `Message: formatter01\n`, fmt.Sprintf("%#v", err))
|
||||
assert.Equal(t,
|
||||
FmtError{"formatter%v%v", []interface{}{0, 1}},
|
||||
err.Data())
|
||||
assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err))
|
||||
assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`)
|
||||
dump := fmt.Sprintf("%#v", err)
|
||||
assert.NotContains(t, dump, "Stack Trace")
|
||||
assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 1`, dump)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: common/types.proto
|
||||
|
||||
/*
|
||||
|
@ -14,10 +14,9 @@ It has these top-level messages:
|
|||
//nolint: gas
|
||||
package common
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
|
@ -28,7 +27,7 @@ var _ = math.Inf
|
|||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Define these here for compatibility but use tmlibs/common.KVPair.
|
||||
type KVPair struct {
|
||||
|
@ -39,7 +38,7 @@ type KVPair struct {
|
|||
func (m *KVPair) Reset() { *m = KVPair{} }
|
||||
func (m *KVPair) String() string { return proto.CompactTextString(m) }
|
||||
func (*KVPair) ProtoMessage() {}
|
||||
func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} }
|
||||
func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *KVPair) GetKey() []byte {
|
||||
if m != nil {
|
||||
|
@ -58,13 +57,13 @@ func (m *KVPair) GetValue() []byte {
|
|||
// Define these here for compatibility but use tmlibs/common.KI64Pair.
|
||||
type KI64Pair struct {
|
||||
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Value int64 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *KI64Pair) Reset() { *m = KI64Pair{} }
|
||||
func (m *KI64Pair) String() string { return proto.CompactTextString(m) }
|
||||
func (*KI64Pair) ProtoMessage() {}
|
||||
func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} }
|
||||
func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *KI64Pair) GetKey() []byte {
|
||||
if m != nil {
|
||||
|
@ -85,17 +84,15 @@ func init() {
|
|||
proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("common/types.proto", fileDescriptorTypes) }
|
||||
func init() { proto.RegisterFile("common/types.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptorTypes = []byte{
|
||||
// 137 bytes of a gzipped FileDescriptorProto
|
||||
var fileDescriptor0 = []byte{
|
||||
// 107 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcd,
|
||||
0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62,
|
||||
0x83, 0x88, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7,
|
||||
0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xa5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x68,
|
||||
0x53, 0x32, 0xe0, 0x62, 0xf3, 0x0e, 0x0b, 0x48, 0xcc, 0x2c, 0x12, 0x12, 0xe0, 0x62, 0xce, 0x4e,
|
||||
0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12,
|
||||
0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x92, 0x11, 0x17, 0x87, 0xb7, 0xa7, 0x99,
|
||||
0x09, 0x31, 0x7a, 0x98, 0xa1, 0x7a, 0x92, 0xd8, 0xc0, 0x96, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff,
|
||||
0xff, 0x5c, 0xb8, 0x46, 0xc5, 0xb9, 0x00, 0x00, 0x00,
|
||||
0x83, 0x88, 0x29, 0x19, 0x70, 0xb1, 0x79, 0x87, 0x05, 0x24, 0x66, 0x16, 0x09, 0x09, 0x70, 0x31,
|
||||
0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac,
|
||||
0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, 0x4c, 0x60, 0x31, 0x08, 0x47, 0xc9, 0x88, 0x8b, 0xc3, 0xdb,
|
||||
0xd3, 0xcc, 0x84, 0x18, 0x3d, 0xcc, 0x50, 0x3d, 0x49, 0x6c, 0x60, 0x4b, 0x8d, 0x01, 0x01, 0x00,
|
||||
0x00, 0xff, 0xff, 0xd8, 0xf1, 0xc3, 0x8c, 0x8a, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -1,13 +1,6 @@
|
|||
syntax = "proto3";
|
||||
package common;
|
||||
|
||||
// For more information on gogo.proto, see:
|
||||
// https://github.com/gogo/protobuf/blob/master/extensions.md
|
||||
// NOTE: Try really hard not to use custom types,
|
||||
// it's often complicated, broken, nor not worth it.
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
|
||||
//----------------------------------------
|
||||
// Abstract types
|
||||
|
||||
|
|
|
@ -149,3 +149,67 @@ func TestGoLevelDBBackend(t *testing.T) {
|
|||
_, ok := db.(*GoLevelDB)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestDBIterator(t *testing.T) {
|
||||
for dbType := range backends {
|
||||
t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) {
|
||||
testDBIterator(t, dbType)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testDBIterator(t *testing.T, backend DBBackendType) {
|
||||
name := cmn.Fmt("test_%x", cmn.RandStr(12))
|
||||
db := NewDB(name, backend, "")
|
||||
defer cleanupDBDir("", name)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
if i != 6 { // but skip 6.
|
||||
db.Set(int642Bytes(int64(i)), nil)
|
||||
}
|
||||
}
|
||||
|
||||
verifyIterator(t, db.Iterator(nil, nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator")
|
||||
verifyIterator(t, db.ReverseIterator(nil, nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator")
|
||||
|
||||
verifyIterator(t, db.Iterator(nil, int642Bytes(0)), []int64(nil), "forward iterator to 0")
|
||||
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(10)), []int64(nil), "reverse iterator 10")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(0), nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(1), nil), []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(10), nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(9), nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(8), nil), []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "forward iterator from 5 to 6")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "forward iterator from 5 to 7")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(8)), []int64{5, 7}, "forward iterator from 5 to 8")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "forward iterator from 6 to 7")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(8)), []int64{7}, "forward iterator from 6 to 8")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(7), int642Bytes(8)), []int64{7}, "forward iterator from 7 to 8")
|
||||
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(4)), []int64{5}, "reverse iterator from 5 to 4")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(4)), []int64{5}, "reverse iterator from 6 to 4")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(4)), []int64{7, 5}, "reverse iterator from 7 to 4")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(5)), []int64(nil), "reverse iterator from 6 to 5")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(5)), []int64{7}, "reverse iterator from 7 to 5")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(6)), []int64{7}, "reverse iterator from 7 to 6")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(0), int642Bytes(1)), []int64{0}, "forward iterator from 0 to 1")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(9), int642Bytes(8)), []int64{9}, "reverse iterator from 9 to 8")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(2), int642Bytes(4)), []int64{2, 3}, "forward iterator from 2 to 4")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "forward iterator from 4 to 2")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(2)), []int64{4, 3}, "reverse iterator from 4 to 2")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(2), int642Bytes(4)), []int64(nil), "reverse iterator from 2 to 4")
|
||||
|
||||
}
|
||||
|
||||
func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) {
|
||||
var list []int64
|
||||
for itr.Valid() {
|
||||
list = append(list, bytes2Int64(itr.Key()))
|
||||
itr.Next()
|
||||
}
|
||||
assert.Equal(t, expected, list, msg)
|
||||
}
|
||||
|
|
|
@ -190,7 +190,8 @@ func (db *CLevelDB) Iterator(start, end []byte) Iterator {
|
|||
}
|
||||
|
||||
func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator {
|
||||
panic("not implemented yet") // XXX
|
||||
itr := db.db.NewIterator(db.ro)
|
||||
return newCLevelDBIterator(itr, start, end, true)
|
||||
}
|
||||
|
||||
var _ Iterator = (*cLevelDBIterator)(nil)
|
||||
|
@ -204,12 +205,25 @@ type cLevelDBIterator struct {
|
|||
|
||||
func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator {
|
||||
if isReverse {
|
||||
panic("not implemented yet") // XXX
|
||||
}
|
||||
if start != nil {
|
||||
source.Seek(start)
|
||||
if start == nil {
|
||||
source.SeekToLast()
|
||||
} else {
|
||||
source.Seek(start)
|
||||
if source.Valid() {
|
||||
soakey := source.Key() // start or after key
|
||||
if bytes.Compare(start, soakey) < 0 {
|
||||
source.Prev()
|
||||
}
|
||||
} else {
|
||||
source.SeekToLast()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
source.SeekToFirst()
|
||||
if start == nil {
|
||||
source.SeekToFirst()
|
||||
} else {
|
||||
source.Seek(start)
|
||||
}
|
||||
}
|
||||
return &cLevelDBIterator{
|
||||
source: source,
|
||||
|
@ -243,9 +257,16 @@ func (itr cLevelDBIterator) Valid() bool {
|
|||
// If key is end or past it, invalid.
|
||||
var end = itr.end
|
||||
var key = itr.source.Key()
|
||||
if end != nil && bytes.Compare(end, key) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
if itr.isReverse {
|
||||
if end != nil && bytes.Compare(key, end) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if end != nil && bytes.Compare(end, key) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// It's valid.
|
||||
|
@ -267,7 +288,11 @@ func (itr cLevelDBIterator) Value() []byte {
|
|||
func (itr cLevelDBIterator) Next() {
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
itr.source.Next()
|
||||
if itr.isReverse {
|
||||
itr.source.Prev()
|
||||
} else {
|
||||
itr.source.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Close() {
|
||||
|
|
|
@ -33,7 +33,9 @@ func (ddb debugDB) Mutex() *sync.Mutex { return nil }
|
|||
// Implements DB.
|
||||
func (ddb debugDB) Get(key []byte) (value []byte) {
|
||||
defer func() {
|
||||
fmt.Printf("%v.Get(%v) %v\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Blue(_fmt("%X", value)))
|
||||
fmt.Printf("%v.Get(%v) %v\n", ddb.label,
|
||||
cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue),
|
||||
cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
|
||||
}()
|
||||
value = ddb.db.Get(key)
|
||||
return
|
||||
|
@ -42,68 +44,85 @@ func (ddb debugDB) Get(key []byte) (value []byte) {
|
|||
// Implements DB.
|
||||
func (ddb debugDB) Has(key []byte) (has bool) {
|
||||
defer func() {
|
||||
fmt.Printf("%v.Has(%v) %v\n", ddb.label, cmn.Cyan(_fmt("%X", key)), has)
|
||||
fmt.Printf("%v.Has(%v) %v\n", ddb.label,
|
||||
cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), has)
|
||||
}()
|
||||
return ddb.db.Has(key)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (ddb debugDB) Set(key []byte, value []byte) {
|
||||
fmt.Printf("%v.Set(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
|
||||
fmt.Printf("%v.Set(%v, %v)\n", ddb.label,
|
||||
cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
|
||||
cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
|
||||
ddb.db.Set(key, value)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (ddb debugDB) SetSync(key []byte, value []byte) {
|
||||
fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
|
||||
fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label,
|
||||
cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
|
||||
cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
|
||||
ddb.db.SetSync(key, value)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (ddb debugDB) SetNoLock(key []byte, value []byte) {
|
||||
fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
|
||||
fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label,
|
||||
cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
|
||||
cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
|
||||
ddb.db.(atomicSetDeleter).SetNoLock(key, value)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (ddb debugDB) SetNoLockSync(key []byte, value []byte) {
|
||||
fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
|
||||
fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label,
|
||||
cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
|
||||
cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
|
||||
ddb.db.(atomicSetDeleter).SetNoLockSync(key, value)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (ddb debugDB) Delete(key []byte) {
|
||||
fmt.Printf("%v.Delete(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
|
||||
fmt.Printf("%v.Delete(%v)\n", ddb.label,
|
||||
cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
|
||||
ddb.db.Delete(key)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (ddb debugDB) DeleteSync(key []byte) {
|
||||
fmt.Printf("%v.DeleteSync(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
|
||||
fmt.Printf("%v.DeleteSync(%v)\n", ddb.label,
|
||||
cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
|
||||
ddb.db.DeleteSync(key)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (ddb debugDB) DeleteNoLock(key []byte) {
|
||||
fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
|
||||
fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label,
|
||||
cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
|
||||
ddb.db.(atomicSetDeleter).DeleteNoLock(key)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (ddb debugDB) DeleteNoLockSync(key []byte) {
|
||||
fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label, cmn.Red(_fmt("%X", key)))
|
||||
fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label,
|
||||
cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
|
||||
ddb.db.(atomicSetDeleter).DeleteNoLockSync(key)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (ddb debugDB) Iterator(start, end []byte) Iterator {
|
||||
fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", start)), cmn.Blue(_fmt("%X", end)))
|
||||
fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label,
|
||||
cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue),
|
||||
cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue))
|
||||
return NewDebugIterator(ddb.label, ddb.db.Iterator(start, end))
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (ddb debugDB) ReverseIterator(start, end []byte) Iterator {
|
||||
fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label, cmn.Cyan(_fmt("%X", start)), cmn.Blue(_fmt("%X", end)))
|
||||
fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label,
|
||||
cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue),
|
||||
cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue))
|
||||
return NewDebugIterator(ddb.label, ddb.db.ReverseIterator(start, end))
|
||||
}
|
||||
|
||||
|
@ -173,15 +192,17 @@ func (ditr debugIterator) Next() {
|
|||
|
||||
// Implements Iterator.
|
||||
func (ditr debugIterator) Key() (key []byte) {
|
||||
fmt.Printf("%v.itr.Key() %v\n", ditr.label, cmn.Cyan(_fmt("%X", key)))
|
||||
key = ditr.itr.Key()
|
||||
fmt.Printf("%v.itr.Key() %v\n", ditr.label,
|
||||
cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue))
|
||||
return
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (ditr debugIterator) Value() (value []byte) {
|
||||
fmt.Printf("%v.itr.Value() %v\n", ditr.label, cmn.Blue(_fmt("%X", value)))
|
||||
value = ditr.itr.Value()
|
||||
fmt.Printf("%v.itr.Value() %v\n", ditr.label,
|
||||
cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -209,13 +230,16 @@ func NewDebugBatch(label string, bch Batch) debugBatch {
|
|||
|
||||
// Implements Batch.
|
||||
func (dbch debugBatch) Set(key, value []byte) {
|
||||
fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label, cmn.Cyan(_fmt("%X", key)), cmn.Yellow(_fmt("%X", value)))
|
||||
fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label,
|
||||
cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue),
|
||||
cmn.ColoredBytes(value, cmn.Green, cmn.Blue))
|
||||
dbch.bch.Set(key, value)
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (dbch debugBatch) Delete(key []byte) {
|
||||
fmt.Printf("%v.batch.Delete(%v)\n", dbch.label, cmn.Red(_fmt("%X", key)))
|
||||
fmt.Printf("%v.batch.Delete(%v)\n", dbch.label,
|
||||
cmn.ColoredBytes(key, cmn.Red, cmn.Yellow))
|
||||
dbch.bch.Delete(key)
|
||||
}
|
||||
|
||||
|
|
18
db/fsdb.go
18
db/fsdb.go
|
@ -151,21 +151,29 @@ func (db *FSDB) Mutex() *sync.Mutex {
|
|||
}
|
||||
|
||||
func (db *FSDB) Iterator(start, end []byte) Iterator {
|
||||
return db.MakeIterator(start, end, false)
|
||||
}
|
||||
|
||||
func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
// We need a copy of all of the keys.
|
||||
// Not the best, but probably not a bottleneck depending.
|
||||
keys, err := list(db.dir, start, end)
|
||||
keys, err := list(db.dir, start, end, isReversed)
|
||||
if err != nil {
|
||||
panic(errors.Wrapf(err, "Listing keys in %s", db.dir))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
if isReversed {
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(keys)))
|
||||
} else {
|
||||
sort.Strings(keys)
|
||||
}
|
||||
return newMemDBIterator(db, keys, start, end)
|
||||
}
|
||||
|
||||
func (db *FSDB) ReverseIterator(start, end []byte) Iterator {
|
||||
panic("not implemented yet") // XXX
|
||||
return db.MakeIterator(start, end, true)
|
||||
}
|
||||
|
||||
func (db *FSDB) nameToPath(name []byte) string {
|
||||
|
@ -213,7 +221,7 @@ func remove(path string) error {
|
|||
|
||||
// List keys in a directory, stripping of escape sequences and dir portions.
|
||||
// CONTRACT: returns os errors directly without wrapping.
|
||||
func list(dirPath string, start, end []byte) ([]string, error) {
|
||||
func list(dirPath string, start, end []byte, isReversed bool) ([]string, error) {
|
||||
dir, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -231,7 +239,7 @@ func list(dirPath string, start, end []byte) ([]string, error) {
|
|||
return nil, fmt.Errorf("Failed to unescape %s while listing", name)
|
||||
}
|
||||
key := unescapeKey([]byte(n))
|
||||
if IsKeyInDomain(key, start, end, false) {
|
||||
if IsKeyInDomain(key, start, end, isReversed) {
|
||||
keys = append(keys, string(key))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -193,7 +193,8 @@ func (db *GoLevelDB) Iterator(start, end []byte) Iterator {
|
|||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator {
|
||||
panic("not implemented yet") // XXX
|
||||
itr := db.db.NewIterator(nil, nil)
|
||||
return newGoLevelDBIterator(itr, start, end, true)
|
||||
}
|
||||
|
||||
type goLevelDBIterator struct {
|
||||
|
@ -208,9 +209,26 @@ var _ Iterator = (*goLevelDBIterator)(nil)
|
|||
|
||||
func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator {
|
||||
if isReverse {
|
||||
panic("not implemented yet") // XXX
|
||||
if start == nil {
|
||||
source.Last()
|
||||
} else {
|
||||
valid := source.Seek(start)
|
||||
if valid {
|
||||
soakey := source.Key() // start or after key
|
||||
if bytes.Compare(start, soakey) < 0 {
|
||||
source.Prev()
|
||||
}
|
||||
} else {
|
||||
source.Last()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if start == nil {
|
||||
source.First()
|
||||
} else {
|
||||
source.Seek(start)
|
||||
}
|
||||
}
|
||||
source.Seek(start)
|
||||
return &goLevelDBIterator{
|
||||
source: source,
|
||||
start: start,
|
||||
|
@ -245,9 +263,17 @@ func (itr *goLevelDBIterator) Valid() bool {
|
|||
// If key is end or past it, invalid.
|
||||
var end = itr.end
|
||||
var key = itr.source.Key()
|
||||
if end != nil && bytes.Compare(end, key) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
|
||||
if itr.isReverse {
|
||||
if end != nil && bytes.Compare(key, end) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if end != nil && bytes.Compare(end, key) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Valid
|
||||
|
@ -276,7 +302,11 @@ func (itr *goLevelDBIterator) Value() []byte {
|
|||
func (itr *goLevelDBIterator) Next() {
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
itr.source.Next()
|
||||
if itr.isReverse {
|
||||
itr.source.Prev()
|
||||
} else {
|
||||
itr.source.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
remotedb is a package for connecting to distributed Tendermint db.DB
|
||||
instances. The purpose is to detach difficult deployments such as
|
||||
CLevelDB that requires gcc or perhaps for databases that require
|
||||
custom configurations such as extra disk space. It also eases
|
||||
the burden and cost of deployment of dependencies for databases
|
||||
to be used by Tendermint developers. Most importantly it is built
|
||||
over the high performant gRPC transport.
|
||||
|
||||
remotedb's RemoteDB implements db.DB so can be used normally
|
||||
like other databases. One just has to explicitly connect to the
|
||||
remote database with a client setup such as:
|
||||
|
||||
client, err := remotedb.NewInsecure(addr)
|
||||
// Make sure to invoke InitRemote!
|
||||
if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil {
|
||||
log.Fatalf("Failed to initialize the remote db")
|
||||
}
|
||||
|
||||
client.Set(key1, value)
|
||||
gv1 := client.SetSync(k2, v2)
|
||||
|
||||
client.Delete(k1)
|
||||
gv2 := client.Get(k1)
|
||||
|
||||
for itr := client.Iterator(k1, k9); itr.Valid(); itr.Next() {
|
||||
ik, iv := itr.Key(), itr.Value()
|
||||
ds, de := itr.Domain()
|
||||
}
|
||||
|
||||
stats := client.Stats()
|
||||
|
||||
if !client.Has(dk1) {
|
||||
client.SetSync(dk1, dv1)
|
||||
}
|
||||
*/
|
||||
package remotedb
|
|
@ -0,0 +1,30 @@
|
|||
package grpcdb
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
protodb "github.com/tendermint/tmlibs/db/remotedb/proto"
|
||||
)
|
||||
|
||||
// Security defines how the client will talk to the gRPC server.
|
||||
type Security uint
|
||||
|
||||
const (
|
||||
Insecure Security = iota
|
||||
Secure
|
||||
)
|
||||
|
||||
// NewClient creates a gRPC client connected to the bound gRPC server at serverAddr.
|
||||
// Use kind to set the level of security to either Secure or Insecure.
|
||||
func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) {
|
||||
creds, err := credentials.NewClientTLSFromFile(serverCert, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cc, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return protodb.NewDBClient(cc), nil
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
grpcdb is the distribution of Tendermint's db.DB instances using
|
||||
the gRPC transport to decouple local db.DB usages from applications,
|
||||
to using them over a network in a highly performant manner.
|
||||
|
||||
grpcdb allows users to initialize a database's server like
|
||||
they would locally and invoke the respective methods of db.DB.
|
||||
|
||||
Most users shouldn't use this package, but should instead use
|
||||
remotedb. Only the lower level users and database server deployers
|
||||
should use it, for functionality such as:
|
||||
|
||||
ln, err := net.Listen("tcp", "0.0.0.0:0")
|
||||
srv := grpcdb.NewServer()
|
||||
defer srv.Stop()
|
||||
go func() {
|
||||
if err := srv.Serve(ln); err != nil {
|
||||
t.Fatalf("BindServer: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
or
|
||||
addr := ":8998"
|
||||
cert := "server.crt"
|
||||
key := "server.key"
|
||||
go func() {
|
||||
if err := grpcdb.ListenAndServe(addr, cert, key); err != nil {
|
||||
log.Fatalf("BindServer: %v", err)
|
||||
}
|
||||
}()
|
||||
*/
|
||||
package grpcdb
|
|
@ -0,0 +1,52 @@
|
|||
package grpcdb_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"log"
|
||||
|
||||
grpcdb "github.com/tendermint/tmlibs/db/remotedb/grpcdb"
|
||||
protodb "github.com/tendermint/tmlibs/db/remotedb/proto"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
addr := ":8998"
|
||||
cert := "server.crt"
|
||||
key := "server.key"
|
||||
go func() {
|
||||
if err := grpcdb.ListenAndServe(addr, cert, key); err != nil {
|
||||
log.Fatalf("BindServer: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
client, err := grpcdb.NewClient(addr, cert)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create grpcDB client: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// 1. Initialize the DB
|
||||
in := &protodb.Init{
|
||||
Type: "leveldb",
|
||||
Name: "grpc-uno-test",
|
||||
Dir: ".",
|
||||
}
|
||||
if _, err := client.Init(ctx, in); err != nil {
|
||||
log.Fatalf("Init error: %v", err)
|
||||
}
|
||||
|
||||
// 2. Now it can be used!
|
||||
query1 := &protodb.Entity{Key: []byte("Project"), Value: []byte("Tmlibs-on-gRPC")}
|
||||
if _, err := client.SetSync(ctx, query1); err != nil {
|
||||
log.Fatalf("SetSync err: %v", err)
|
||||
}
|
||||
|
||||
query2 := &protodb.Entity{Key: []byte("Project")}
|
||||
read, err := client.Get(ctx, query2)
|
||||
if err != nil {
|
||||
log.Fatalf("Get err: %v", err)
|
||||
}
|
||||
if g, w := read.Value, []byte("Tmlibs-on-gRPC"); !bytes.Equal(g, w) {
|
||||
log.Fatalf("got= (%q ==> % X)\nwant=(%q ==> % X)", g, g, w, w)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,197 @@
|
|||
package grpcdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"github.com/tendermint/tmlibs/db"
|
||||
protodb "github.com/tendermint/tmlibs/db/remotedb/proto"
|
||||
)
|
||||
|
||||
// ListenAndServe is a blocking function that sets up a gRPC based
|
||||
// server at the address supplied, with the gRPC options passed in.
|
||||
// Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe.
|
||||
func ListenAndServe(addr, cert, key string, opts ...grpc.ServerOption) error {
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv, err := NewServer(cert, key, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.Serve(ln)
|
||||
}
|
||||
|
||||
func NewServer(cert, key string, opts ...grpc.ServerOption) (*grpc.Server, error) {
|
||||
creds, err := credentials.NewServerTLSFromFile(cert, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, grpc.Creds(creds))
|
||||
srv := grpc.NewServer(opts...)
|
||||
protodb.RegisterDBServer(srv, new(server))
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
type server struct {
|
||||
mu sync.Mutex
|
||||
db db.DB
|
||||
}
|
||||
|
||||
var _ protodb.DBServer = (*server)(nil)
|
||||
|
||||
// Init initializes the server's database. Only one type of database
|
||||
// can be initialized per server.
|
||||
//
|
||||
// Dir is the directory on the file system in which the DB will be stored(if backed by disk) (TODO: remove)
|
||||
//
|
||||
// Name is representative filesystem entry's basepath
|
||||
//
|
||||
// Type can be either one of:
|
||||
// * cleveldb (if built with gcc enabled)
|
||||
// * fsdb
|
||||
// * memdB
|
||||
// * leveldb
|
||||
// See https://godoc.org/github.com/tendermint/tmlibs/db#DBBackendType
|
||||
func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir)
|
||||
return &protodb.Entity{CreatedAt: time.Now().Unix()}, nil
|
||||
}
|
||||
|
||||
func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
|
||||
s.db.Delete(in.Key)
|
||||
return nothing, nil
|
||||
}
|
||||
|
||||
var nothing = new(protodb.Nothing)
|
||||
|
||||
func (s *server) DeleteSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
|
||||
s.db.DeleteSync(in.Key)
|
||||
return nothing, nil
|
||||
}
|
||||
|
||||
func (s *server) Get(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) {
|
||||
value := s.db.Get(in.Key)
|
||||
return &protodb.Entity{Value: value}, nil
|
||||
}
|
||||
|
||||
func (s *server) GetStream(ds protodb.DB_GetStreamServer) error {
|
||||
// Receive routine
|
||||
responsesChan := make(chan *protodb.Entity)
|
||||
go func() {
|
||||
defer close(responsesChan)
|
||||
ctx := context.Background()
|
||||
for {
|
||||
in, err := ds.Recv()
|
||||
if err != nil {
|
||||
responsesChan <- &protodb.Entity{Err: err.Error()}
|
||||
return
|
||||
}
|
||||
out, err := s.Get(ctx, in)
|
||||
if err != nil {
|
||||
if out == nil {
|
||||
out = new(protodb.Entity)
|
||||
out.Key = in.Key
|
||||
}
|
||||
out.Err = err.Error()
|
||||
responsesChan <- out
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise continue on
|
||||
responsesChan <- out
|
||||
}
|
||||
}()
|
||||
|
||||
// Send routine, block until we return
|
||||
for out := range responsesChan {
|
||||
if err := ds.Send(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) Has(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) {
|
||||
exists := s.db.Has(in.Key)
|
||||
return &protodb.Entity{Exists: exists}, nil
|
||||
}
|
||||
|
||||
func (s *server) Set(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
|
||||
s.db.Set(in.Key, in.Value)
|
||||
return nothing, nil
|
||||
}
|
||||
|
||||
func (s *server) SetSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
|
||||
s.db.SetSync(in.Key, in.Value)
|
||||
return nothing, nil
|
||||
}
|
||||
|
||||
func (s *server) Iterator(query *protodb.Entity, dis protodb.DB_IteratorServer) error {
|
||||
it := s.db.Iterator(query.Start, query.End)
|
||||
return s.handleIterator(it, dis.Send)
|
||||
}
|
||||
|
||||
func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) error) error {
|
||||
for it.Valid() {
|
||||
start, end := it.Domain()
|
||||
out := &protodb.Iterator{
|
||||
Domain: &protodb.Domain{Start: start, End: end},
|
||||
Valid: it.Valid(),
|
||||
Key: it.Key(),
|
||||
Value: it.Value(),
|
||||
}
|
||||
if err := sendFunc(out); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finally move the iterator forward
|
||||
it.Next()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) ReverseIterator(query *protodb.Entity, dis protodb.DB_ReverseIteratorServer) error {
|
||||
it := s.db.ReverseIterator(query.Start, query.End)
|
||||
return s.handleIterator(it, dis.Send)
|
||||
}
|
||||
|
||||
func (s *server) Stats(context.Context, *protodb.Nothing) (*protodb.Stats, error) {
|
||||
stats := s.db.Stats()
|
||||
return &protodb.Stats{Data: stats, TimeAt: time.Now().Unix()}, nil
|
||||
}
|
||||
|
||||
func (s *server) BatchWrite(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) {
|
||||
return s.batchWrite(c, b, false)
|
||||
}
|
||||
|
||||
func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) {
|
||||
return s.batchWrite(c, b, true)
|
||||
}
|
||||
|
||||
func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) {
|
||||
bat := s.db.NewBatch()
|
||||
for _, op := range b.Ops {
|
||||
switch op.Type {
|
||||
case protodb.Operation_SET:
|
||||
bat.Set(op.Entity.Key, op.Entity.Value)
|
||||
case protodb.Operation_DELETE:
|
||||
bat.Delete(op.Entity.Key)
|
||||
}
|
||||
}
|
||||
if sync {
|
||||
bat.WriteSync()
|
||||
} else {
|
||||
bat.Write()
|
||||
}
|
||||
return nothing, nil
|
||||
}
|
|
@ -0,0 +1,914 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: defs.proto
|
||||
|
||||
/*
|
||||
Package protodb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
defs.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Batch
|
||||
Operation
|
||||
Entity
|
||||
Nothing
|
||||
Domain
|
||||
Iterator
|
||||
Stats
|
||||
Init
|
||||
*/
|
||||
package protodb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Operation_Type int32
|
||||
|
||||
const (
|
||||
Operation_SET Operation_Type = 0
|
||||
Operation_DELETE Operation_Type = 1
|
||||
)
|
||||
|
||||
var Operation_Type_name = map[int32]string{
|
||||
0: "SET",
|
||||
1: "DELETE",
|
||||
}
|
||||
var Operation_Type_value = map[string]int32{
|
||||
"SET": 0,
|
||||
"DELETE": 1,
|
||||
}
|
||||
|
||||
func (x Operation_Type) String() string {
|
||||
return proto.EnumName(Operation_Type_name, int32(x))
|
||||
}
|
||||
func (Operation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
|
||||
|
||||
type Batch struct {
|
||||
Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Batch) Reset() { *m = Batch{} }
|
||||
func (m *Batch) String() string { return proto.CompactTextString(m) }
|
||||
func (*Batch) ProtoMessage() {}
|
||||
func (*Batch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *Batch) GetOps() []*Operation {
|
||||
if m != nil {
|
||||
return m.Ops
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Operation struct {
|
||||
Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"`
|
||||
Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Operation) Reset() { *m = Operation{} }
|
||||
func (m *Operation) String() string { return proto.CompactTextString(m) }
|
||||
func (*Operation) ProtoMessage() {}
|
||||
func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *Operation) GetEntity() *Entity {
|
||||
if m != nil {
|
||||
return m.Entity
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Operation) GetType() Operation_Type {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return Operation_SET
|
||||
}
|
||||
|
||||
type Entity struct {
|
||||
Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
|
||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"`
|
||||
Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"`
|
||||
End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"`
|
||||
Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"`
|
||||
CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Entity) Reset() { *m = Entity{} }
|
||||
func (m *Entity) String() string { return proto.CompactTextString(m) }
|
||||
func (*Entity) ProtoMessage() {}
|
||||
func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func (m *Entity) GetId() int32 {
|
||||
if m != nil {
|
||||
return m.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Entity) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Entity) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Entity) GetExists() bool {
|
||||
if m != nil {
|
||||
return m.Exists
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Entity) GetStart() []byte {
|
||||
if m != nil {
|
||||
return m.Start
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Entity) GetEnd() []byte {
|
||||
if m != nil {
|
||||
return m.End
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Entity) GetErr() string {
|
||||
if m != nil {
|
||||
return m.Err
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Entity) GetCreatedAt() int64 {
|
||||
if m != nil {
|
||||
return m.CreatedAt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Nothing struct {
|
||||
}
|
||||
|
||||
func (m *Nothing) Reset() { *m = Nothing{} }
|
||||
func (m *Nothing) String() string { return proto.CompactTextString(m) }
|
||||
func (*Nothing) ProtoMessage() {}
|
||||
func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
type Domain struct {
|
||||
Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"`
|
||||
End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Domain) Reset() { *m = Domain{} }
|
||||
func (m *Domain) String() string { return proto.CompactTextString(m) }
|
||||
func (*Domain) ProtoMessage() {}
|
||||
func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||
|
||||
func (m *Domain) GetStart() []byte {
|
||||
if m != nil {
|
||||
return m.Start
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Domain) GetEnd() []byte {
|
||||
if m != nil {
|
||||
return m.End
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Iterator struct {
|
||||
Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"`
|
||||
Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"`
|
||||
Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Iterator) Reset() { *m = Iterator{} }
|
||||
func (m *Iterator) String() string { return proto.CompactTextString(m) }
|
||||
func (*Iterator) ProtoMessage() {}
|
||||
func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||
|
||||
func (m *Iterator) GetDomain() *Domain {
|
||||
if m != nil {
|
||||
return m.Domain
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Iterator) GetValid() bool {
|
||||
if m != nil {
|
||||
return m.Valid
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Iterator) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Iterator) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Stats) Reset() { *m = Stats{} }
|
||||
func (m *Stats) String() string { return proto.CompactTextString(m) }
|
||||
func (*Stats) ProtoMessage() {}
|
||||
func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||
|
||||
func (m *Stats) GetData() map[string]string {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Stats) GetTimeAt() int64 {
|
||||
if m != nil {
|
||||
return m.TimeAt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Init struct {
|
||||
Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"`
|
||||
Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Init) Reset() { *m = Init{} }
|
||||
func (m *Init) String() string { return proto.CompactTextString(m) }
|
||||
func (*Init) ProtoMessage() {}
|
||||
func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||
|
||||
func (m *Init) GetType() string {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Init) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Init) GetDir() string {
|
||||
if m != nil {
|
||||
return m.Dir
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Batch)(nil), "protodb.Batch")
|
||||
proto.RegisterType((*Operation)(nil), "protodb.Operation")
|
||||
proto.RegisterType((*Entity)(nil), "protodb.Entity")
|
||||
proto.RegisterType((*Nothing)(nil), "protodb.Nothing")
|
||||
proto.RegisterType((*Domain)(nil), "protodb.Domain")
|
||||
proto.RegisterType((*Iterator)(nil), "protodb.Iterator")
|
||||
proto.RegisterType((*Stats)(nil), "protodb.Stats")
|
||||
proto.RegisterType((*Init)(nil), "protodb.Init")
|
||||
proto.RegisterEnum("protodb.Operation_Type", Operation_Type_name, Operation_Type_value)
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for DB service
|
||||
|
||||
type DBClient interface {
|
||||
Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error)
|
||||
Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error)
|
||||
GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error)
|
||||
Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error)
|
||||
Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
|
||||
SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
|
||||
Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
|
||||
DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
|
||||
Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error)
|
||||
ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error)
|
||||
// rpc print(Nothing) returns (Entity) {}
|
||||
Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error)
|
||||
BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error)
|
||||
BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error)
|
||||
}
|
||||
|
||||
type dBClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewDBClient(cc *grpc.ClientConn) DBClient {
|
||||
return &dBClient{cc}
|
||||
}
|
||||
|
||||
func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) {
|
||||
out := new(Entity)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/init", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) {
|
||||
out := new(Entity)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/get", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[0], c.cc, "/protodb.DB/getStream", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &dBGetStreamClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type DB_GetStreamClient interface {
|
||||
Send(*Entity) error
|
||||
Recv() (*Entity, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type dBGetStreamClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *dBGetStreamClient) Send(m *Entity) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *dBGetStreamClient) Recv() (*Entity, error) {
|
||||
m := new(Entity)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) {
|
||||
out := new(Entity)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/has", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/set", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/setSync", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/delete", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[1], c.cc, "/protodb.DB/iterator", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &dBIteratorClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type DB_IteratorClient interface {
|
||||
Recv() (*Iterator, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type dBIteratorClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *dBIteratorClient) Recv() (*Iterator, error) {
|
||||
m := new(Iterator)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[2], c.cc, "/protodb.DB/reverseIterator", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &dBReverseIteratorClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type DB_ReverseIteratorClient interface {
|
||||
Recv() (*Iterator, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type dBReverseIteratorClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *dBReverseIteratorClient) Recv() (*Iterator, error) {
|
||||
m := new(Iterator)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) {
|
||||
out := new(Stats)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/stats", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for DB service
|
||||
|
||||
type DBServer interface {
|
||||
Init(context.Context, *Init) (*Entity, error)
|
||||
Get(context.Context, *Entity) (*Entity, error)
|
||||
GetStream(DB_GetStreamServer) error
|
||||
Has(context.Context, *Entity) (*Entity, error)
|
||||
Set(context.Context, *Entity) (*Nothing, error)
|
||||
SetSync(context.Context, *Entity) (*Nothing, error)
|
||||
Delete(context.Context, *Entity) (*Nothing, error)
|
||||
DeleteSync(context.Context, *Entity) (*Nothing, error)
|
||||
Iterator(*Entity, DB_IteratorServer) error
|
||||
ReverseIterator(*Entity, DB_ReverseIteratorServer) error
|
||||
// rpc print(Nothing) returns (Entity) {}
|
||||
Stats(context.Context, *Nothing) (*Stats, error)
|
||||
BatchWrite(context.Context, *Batch) (*Nothing, error)
|
||||
BatchWriteSync(context.Context, *Batch) (*Nothing, error)
|
||||
}
|
||||
|
||||
func RegisterDBServer(s *grpc.Server, srv DBServer) {
|
||||
s.RegisterService(&_DB_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _DB_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Init)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Init(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Init",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Init(ctx, req.(*Init))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Get(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Get",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Get(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_GetStream_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(DBServer).GetStream(&dBGetStreamServer{stream})
|
||||
}
|
||||
|
||||
type DB_GetStreamServer interface {
|
||||
Send(*Entity) error
|
||||
Recv() (*Entity, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type dBGetStreamServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *dBGetStreamServer) Send(m *Entity) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *dBGetStreamServer) Recv() (*Entity, error) {
|
||||
m := new(Entity)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _DB_Has_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Has(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Has",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Has(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Set(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Set",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Set(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_SetSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).SetSync(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/SetSync",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).SetSync(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Delete(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Delete",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Delete(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_DeleteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).DeleteSync(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/DeleteSync",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).DeleteSync(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_Iterator_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(Entity)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(DBServer).Iterator(m, &dBIteratorServer{stream})
|
||||
}
|
||||
|
||||
type DB_IteratorServer interface {
|
||||
Send(*Iterator) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type dBIteratorServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *dBIteratorServer) Send(m *Iterator) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _DB_ReverseIterator_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(Entity)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(DBServer).ReverseIterator(m, &dBReverseIteratorServer{stream})
|
||||
}
|
||||
|
||||
type DB_ReverseIteratorServer interface {
|
||||
Send(*Iterator) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type dBReverseIteratorServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *dBReverseIteratorServer) Send(m *Iterator) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _DB_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Nothing)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Stats(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Stats",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Stats(ctx, req.(*Nothing))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_BatchWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Batch)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).BatchWrite(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/BatchWrite",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).BatchWrite(ctx, req.(*Batch))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_BatchWriteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Batch)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).BatchWriteSync(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/BatchWriteSync",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).BatchWriteSync(ctx, req.(*Batch))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _DB_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "protodb.DB",
|
||||
HandlerType: (*DBServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "init",
|
||||
Handler: _DB_Init_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "get",
|
||||
Handler: _DB_Get_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "has",
|
||||
Handler: _DB_Has_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "set",
|
||||
Handler: _DB_Set_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "setSync",
|
||||
Handler: _DB_SetSync_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "delete",
|
||||
Handler: _DB_Delete_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "deleteSync",
|
||||
Handler: _DB_DeleteSync_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "stats",
|
||||
Handler: _DB_Stats_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "batchWrite",
|
||||
Handler: _DB_BatchWrite_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "batchWriteSync",
|
||||
Handler: _DB_BatchWriteSync_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "getStream",
|
||||
Handler: _DB_GetStream_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "iterator",
|
||||
Handler: _DB_Iterator_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "reverseIterator",
|
||||
Handler: _DB_ReverseIterator_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "defs.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("defs.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 606 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e,
|
||||
0x10, 0xcd, 0xda, 0x8e, 0x13, 0x4f, 0x7f, 0xbf, 0x34, 0x8c, 0x10, 0xb5, 0x8a, 0x90, 0x22, 0x0b,
|
||||
0x09, 0x43, 0x69, 0x14, 0x52, 0x24, 0xfe, 0x9c, 0x68, 0x95, 0x1c, 0x2a, 0xa1, 0x22, 0x39, 0x95,
|
||||
0x38, 0xa2, 0x6d, 0x3d, 0x34, 0x2b, 0x1a, 0x3b, 0xac, 0x87, 0x8a, 0x5c, 0xb8, 0xf2, 0x79, 0xf8,
|
||||
0x7c, 0x5c, 0xd0, 0xae, 0x1d, 0x87, 0x36, 0x39, 0x84, 0x53, 0x76, 0x66, 0xde, 0x7b, 0xb3, 0xf3,
|
||||
0x32, 0x5e, 0x80, 0x94, 0x3e, 0x17, 0xfd, 0xb9, 0xce, 0x39, 0xc7, 0x96, 0xfd, 0x49, 0x2f, 0xa2,
|
||||
0x43, 0x68, 0x9e, 0x48, 0xbe, 0x9c, 0xe2, 0x63, 0x70, 0xf3, 0x79, 0x11, 0x8a, 0x9e, 0x1b, 0xef,
|
||||
0x0c, 0xb1, 0x5f, 0xd5, 0xfb, 0x1f, 0xe6, 0xa4, 0x25, 0xab, 0x3c, 0x4b, 0x4c, 0x39, 0xfa, 0x01,
|
||||
0x41, 0x9d, 0xc1, 0x27, 0xe0, 0x53, 0xc6, 0x8a, 0x17, 0xa1, 0xe8, 0x89, 0x78, 0x67, 0xb8, 0x5b,
|
||||
0xb3, 0xc6, 0x36, 0x9d, 0x54, 0x65, 0x3c, 0x00, 0x8f, 0x17, 0x73, 0x0a, 0x9d, 0x9e, 0x88, 0x3b,
|
||||
0xc3, 0xbd, 0x75, 0xf1, 0xfe, 0xf9, 0x62, 0x4e, 0x89, 0x05, 0x45, 0x0f, 0xc1, 0x33, 0x11, 0xb6,
|
||||
0xc0, 0x9d, 0x8c, 0xcf, 0xbb, 0x0d, 0x04, 0xf0, 0x47, 0xe3, 0xf7, 0xe3, 0xf3, 0x71, 0x57, 0x44,
|
||||
0xbf, 0x04, 0xf8, 0xa5, 0x38, 0x76, 0xc0, 0x51, 0xa9, 0xed, 0xdc, 0x4c, 0x1c, 0x95, 0x62, 0x17,
|
||||
0xdc, 0x2f, 0xb4, 0xb0, 0x3d, 0xfe, 0x4b, 0xcc, 0x11, 0xef, 0x43, 0xf3, 0x46, 0x5e, 0x7f, 0xa3,
|
||||
0xd0, 0xb5, 0xb9, 0x32, 0xc0, 0x07, 0xe0, 0xd3, 0x77, 0x55, 0x70, 0x11, 0x7a, 0x3d, 0x11, 0xb7,
|
||||
0x93, 0x2a, 0x32, 0xe8, 0x82, 0xa5, 0xe6, 0xb0, 0x59, 0xa2, 0x6d, 0x60, 0x54, 0x29, 0x4b, 0x43,
|
||||
0xbf, 0x54, 0xa5, 0xcc, 0xf6, 0x21, 0xad, 0xc3, 0x56, 0x4f, 0xc4, 0x41, 0x62, 0x8e, 0xf8, 0x08,
|
||||
0xe0, 0x52, 0x93, 0x64, 0x4a, 0x3f, 0x49, 0x0e, 0xdb, 0x3d, 0x11, 0xbb, 0x49, 0x50, 0x65, 0x8e,
|
||||
0x39, 0x0a, 0xa0, 0x75, 0x96, 0xf3, 0x54, 0x65, 0x57, 0xd1, 0x00, 0xfc, 0x51, 0x3e, 0x93, 0x2a,
|
||||
0x5b, 0x75, 0x13, 0x1b, 0xba, 0x39, 0x75, 0xb7, 0xe8, 0x2b, 0xb4, 0x4f, 0xd9, 0xb8, 0x94, 0x6b,
|
||||
0xe3, 0x77, 0x6a, 0xd9, 0x6b, 0x7e, 0x97, 0xa2, 0x49, 0x55, 0xae, 0x06, 0x57, 0xa5, 0x50, 0x3b,
|
||||
0x29, 0x83, 0xa5, 0x41, 0xee, 0x06, 0x83, 0xbc, 0xbf, 0x0c, 0x8a, 0x7e, 0x0a, 0x68, 0x4e, 0x58,
|
||||
0x72, 0x81, 0xcf, 0xc1, 0x4b, 0x25, 0xcb, 0x6a, 0x29, 0xc2, 0xba, 0x9d, 0xad, 0xf6, 0x47, 0x92,
|
||||
0xe5, 0x38, 0x63, 0xbd, 0x48, 0x2c, 0x0a, 0xf7, 0xa0, 0xc5, 0x6a, 0x46, 0xc6, 0x03, 0xc7, 0x7a,
|
||||
0xe0, 0x9b, 0xf0, 0x98, 0xf7, 0x5f, 0x41, 0x50, 0x63, 0x97, 0xb7, 0x10, 0xa5, 0x7d, 0xb7, 0x6e,
|
||||
0xe1, 0xd8, 0x5c, 0x19, 0xbc, 0x75, 0x5e, 0x8b, 0xe8, 0x1d, 0x78, 0xa7, 0x99, 0x62, 0xc4, 0x72,
|
||||
0x25, 0x2a, 0x52, 0xb9, 0x1e, 0x08, 0xde, 0x99, 0x9c, 0x2d, 0x49, 0xf6, 0x6c, 0xb4, 0x47, 0x4a,
|
||||
0xdb, 0x09, 0x83, 0xc4, 0x1c, 0x87, 0xbf, 0x3d, 0x70, 0x46, 0x27, 0x18, 0x83, 0xa7, 0x8c, 0xd0,
|
||||
0xff, 0xf5, 0x08, 0x46, 0x77, 0xff, 0xee, 0xc2, 0x46, 0x0d, 0x7c, 0x0a, 0xee, 0x15, 0x31, 0xde,
|
||||
0xad, 0x6c, 0x82, 0x1e, 0x41, 0x70, 0x45, 0x3c, 0x61, 0x4d, 0x72, 0xb6, 0x0d, 0x21, 0x16, 0x03,
|
||||
0x61, 0xf4, 0xa7, 0xb2, 0xd8, 0x4a, 0xff, 0x19, 0xb8, 0xc5, 0xa6, 0xab, 0x74, 0xeb, 0xc4, 0x72,
|
||||
0xad, 0x1a, 0xd8, 0x87, 0x56, 0x41, 0x3c, 0x59, 0x64, 0x97, 0xdb, 0xe1, 0x0f, 0xc1, 0x4f, 0xe9,
|
||||
0x9a, 0x98, 0xb6, 0x83, 0xbf, 0x30, 0x8f, 0x87, 0x81, 0x6f, 0xdf, 0x61, 0x08, 0x6d, 0xb5, 0x5c,
|
||||
0xdc, 0x35, 0xc2, 0xbd, 0xd5, 0xff, 0x50, 0x61, 0xa2, 0xc6, 0x40, 0xe0, 0x1b, 0xd8, 0xd5, 0x74,
|
||||
0x43, 0xba, 0xa0, 0xd3, 0x7f, 0xa5, 0x1e, 0xd8, 0xef, 0x89, 0x0b, 0x5c, 0xbb, 0xcb, 0x7e, 0xe7,
|
||||
0xf6, 0xde, 0x46, 0x0d, 0x1c, 0x00, 0x5c, 0x98, 0x47, 0xef, 0xa3, 0x56, 0x4c, 0xb8, 0xaa, 0xdb,
|
||||
0x97, 0x70, 0xe3, 0x34, 0x2f, 0xa1, 0xb3, 0x62, 0x58, 0x13, 0xb6, 0x60, 0x5d, 0xf8, 0x36, 0x75,
|
||||
0xf4, 0x27, 0x00, 0x00, 0xff, 0xff, 0x95, 0xf4, 0xe3, 0x82, 0x7a, 0x05, 0x00, 0x00,
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package protodb;
|
||||
|
||||
message Batch {
|
||||
repeated Operation ops = 1;
|
||||
}
|
||||
|
||||
message Operation {
|
||||
Entity entity = 1;
|
||||
enum Type {
|
||||
SET = 0;
|
||||
DELETE = 1;
|
||||
}
|
||||
Type type = 2;
|
||||
}
|
||||
|
||||
message Entity {
|
||||
int32 id = 1;
|
||||
bytes key = 2;
|
||||
bytes value = 3;
|
||||
bool exists = 4;
|
||||
bytes start = 5;
|
||||
bytes end = 6;
|
||||
string err = 7;
|
||||
int64 created_at = 8;
|
||||
}
|
||||
|
||||
message Nothing {
|
||||
}
|
||||
|
||||
message Domain {
|
||||
bytes start = 1;
|
||||
bytes end = 2;
|
||||
}
|
||||
|
||||
message Iterator {
|
||||
Domain domain = 1;
|
||||
bool valid = 2;
|
||||
bytes key = 3;
|
||||
bytes value = 4;
|
||||
}
|
||||
|
||||
message Stats {
|
||||
map<string, string> data = 1;
|
||||
int64 time_at = 2;
|
||||
}
|
||||
|
||||
message Init {
|
||||
string Type = 1;
|
||||
string Name = 2;
|
||||
string Dir = 3;
|
||||
}
|
||||
|
||||
service DB {
|
||||
rpc init(Init) returns (Entity) {}
|
||||
rpc get(Entity) returns (Entity) {}
|
||||
rpc getStream(stream Entity) returns (stream Entity) {}
|
||||
|
||||
rpc has(Entity) returns (Entity) {}
|
||||
rpc set(Entity) returns (Nothing) {}
|
||||
rpc setSync(Entity) returns (Nothing) {}
|
||||
rpc delete(Entity) returns (Nothing) {}
|
||||
rpc deleteSync(Entity) returns (Nothing) {}
|
||||
rpc iterator(Entity) returns (stream Iterator) {}
|
||||
rpc reverseIterator(Entity) returns (stream Iterator) {}
|
||||
// rpc print(Nothing) returns (Entity) {}
|
||||
rpc stats(Nothing) returns (Stats) {}
|
||||
rpc batchWrite(Batch) returns (Nothing) {}
|
||||
rpc batchWriteSync(Batch) returns (Nothing) {}
|
||||
}
|
|
@ -0,0 +1,262 @@
|
|||
package remotedb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tmlibs/db"
|
||||
"github.com/tendermint/tmlibs/db/remotedb/grpcdb"
|
||||
protodb "github.com/tendermint/tmlibs/db/remotedb/proto"
|
||||
)
|
||||
|
||||
type RemoteDB struct {
|
||||
ctx context.Context
|
||||
dc protodb.DBClient
|
||||
}
|
||||
|
||||
func NewRemoteDB(serverAddr string, serverKey string) (*RemoteDB, error) {
|
||||
return newRemoteDB(grpcdb.NewClient(serverAddr, serverKey))
|
||||
}
|
||||
|
||||
func newRemoteDB(gdc protodb.DBClient, err error) (*RemoteDB, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &RemoteDB{dc: gdc, ctx: context.Background()}, nil
|
||||
}
|
||||
|
||||
type Init struct {
|
||||
Dir string
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) InitRemote(in *Init) error {
|
||||
_, err := rd.dc.Init(rd.ctx, &protodb.Init{Dir: in.Dir, Type: in.Type, Name: in.Name})
|
||||
return err
|
||||
}
|
||||
|
||||
var _ db.DB = (*RemoteDB)(nil)
|
||||
|
||||
// Close is a noop currently
|
||||
func (rd *RemoteDB) Close() {
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Delete(key []byte) {
|
||||
if _, err := rd.dc.Delete(rd.ctx, &protodb.Entity{Key: key}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Delete: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) DeleteSync(key []byte) {
|
||||
if _, err := rd.dc.DeleteSync(rd.ctx, &protodb.Entity{Key: key}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.DeleteSync: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Set(key, value []byte) {
|
||||
if _, err := rd.dc.Set(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Set: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) SetSync(key, value []byte) {
|
||||
if _, err := rd.dc.SetSync(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.SetSync: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Get(key []byte) []byte {
|
||||
res, err := rd.dc.Get(rd.ctx, &protodb.Entity{Key: key})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Get error: %v", err))
|
||||
}
|
||||
return res.Value
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Has(key []byte) bool {
|
||||
res, err := rd.dc.Has(rd.ctx, &protodb.Entity{Key: key})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Has error: %v", err))
|
||||
}
|
||||
return res.Exists
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) ReverseIterator(start, end []byte) db.Iterator {
|
||||
dic, err := rd.dc.ReverseIterator(rd.ctx, &protodb.Entity{Start: start, End: end})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err))
|
||||
}
|
||||
return makeReverseIterator(dic)
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) NewBatch() db.Batch {
|
||||
return &batch{
|
||||
db: rd,
|
||||
ops: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement Print when db.DB implements a method
|
||||
// to print to a string and not db.Print to stdout.
|
||||
func (rd *RemoteDB) Print() {
|
||||
panic("Unimplemented")
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Stats() map[string]string {
|
||||
stats, err := rd.dc.Stats(rd.ctx, &protodb.Nothing{})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Stats error: %v", err))
|
||||
}
|
||||
if stats == nil {
|
||||
return nil
|
||||
}
|
||||
return stats.Data
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Iterator(start, end []byte) db.Iterator {
|
||||
dic, err := rd.dc.Iterator(rd.ctx, &protodb.Entity{Start: start, End: end})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err))
|
||||
}
|
||||
return makeIterator(dic)
|
||||
}
|
||||
|
||||
func makeIterator(dic protodb.DB_IteratorClient) db.Iterator {
|
||||
return &iterator{dic: dic}
|
||||
}
|
||||
|
||||
func makeReverseIterator(dric protodb.DB_ReverseIteratorClient) db.Iterator {
|
||||
return &reverseIterator{dric: dric}
|
||||
}
|
||||
|
||||
type reverseIterator struct {
|
||||
dric protodb.DB_ReverseIteratorClient
|
||||
cur *protodb.Iterator
|
||||
}
|
||||
|
||||
var _ db.Iterator = (*iterator)(nil)
|
||||
|
||||
func (rItr *reverseIterator) Valid() bool {
|
||||
return rItr.cur != nil && rItr.cur.Valid
|
||||
}
|
||||
|
||||
func (rItr *reverseIterator) Domain() (start, end []byte) {
|
||||
if rItr.cur == nil || rItr.cur.Domain == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return rItr.cur.Domain.Start, rItr.cur.Domain.End
|
||||
}
|
||||
|
||||
// Next advances the current reverseIterator
|
||||
func (rItr *reverseIterator) Next() {
|
||||
var err error
|
||||
rItr.cur, err = rItr.dric.Recv()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.ReverseIterator.Next error: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (rItr *reverseIterator) Key() []byte {
|
||||
if rItr.cur == nil {
|
||||
return nil
|
||||
}
|
||||
return rItr.cur.Key
|
||||
}
|
||||
|
||||
func (rItr *reverseIterator) Value() []byte {
|
||||
if rItr.cur == nil {
|
||||
return nil
|
||||
}
|
||||
return rItr.cur.Value
|
||||
}
|
||||
|
||||
func (rItr *reverseIterator) Close() {
|
||||
}
|
||||
|
||||
// iterator implements the db.Iterator by retrieving
|
||||
// streamed iterators from the remote backend as
|
||||
// needed. It is NOT safe for concurrent usage,
|
||||
// matching the behavior of other iterators.
|
||||
type iterator struct {
|
||||
dic protodb.DB_IteratorClient
|
||||
cur *protodb.Iterator
|
||||
}
|
||||
|
||||
var _ db.Iterator = (*iterator)(nil)
|
||||
|
||||
func (itr *iterator) Valid() bool {
|
||||
return itr.cur != nil && itr.cur.Valid
|
||||
}
|
||||
|
||||
func (itr *iterator) Domain() (start, end []byte) {
|
||||
if itr.cur == nil || itr.cur.Domain == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return itr.cur.Domain.Start, itr.cur.Domain.End
|
||||
}
|
||||
|
||||
// Next advances the current iterator
|
||||
func (itr *iterator) Next() {
|
||||
var err error
|
||||
itr.cur, err = itr.dic.Recv()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Iterator.Next error: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *iterator) Key() []byte {
|
||||
if itr.cur == nil {
|
||||
return nil
|
||||
}
|
||||
return itr.cur.Key
|
||||
}
|
||||
|
||||
func (itr *iterator) Value() []byte {
|
||||
if itr.cur == nil {
|
||||
return nil
|
||||
}
|
||||
return itr.cur.Value
|
||||
}
|
||||
|
||||
func (itr *iterator) Close() {
|
||||
err := itr.dic.CloseSend()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error closing iterator: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
type batch struct {
|
||||
db *RemoteDB
|
||||
ops []*protodb.Operation
|
||||
}
|
||||
|
||||
var _ db.Batch = (*batch)(nil)
|
||||
|
||||
func (bat *batch) Set(key, value []byte) {
|
||||
op := &protodb.Operation{
|
||||
Entity: &protodb.Entity{Key: key, Value: value},
|
||||
Type: protodb.Operation_SET,
|
||||
}
|
||||
bat.ops = append(bat.ops, op)
|
||||
}
|
||||
|
||||
func (bat *batch) Delete(key []byte) {
|
||||
op := &protodb.Operation{
|
||||
Entity: &protodb.Entity{Key: key},
|
||||
Type: protodb.Operation_DELETE,
|
||||
}
|
||||
bat.ops = append(bat.ops, op)
|
||||
}
|
||||
|
||||
func (bat *batch) Write() {
|
||||
if _, err := bat.db.dc.BatchWrite(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.BatchWrite: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (bat *batch) WriteSync() {
|
||||
if _, err := bat.db.dc.BatchWriteSync(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
package remotedb_test
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tmlibs/db/remotedb"
|
||||
"github.com/tendermint/tmlibs/db/remotedb/grpcdb"
|
||||
)
|
||||
|
||||
func TestRemoteDB(t *testing.T) {
|
||||
cert := "::.crt"
|
||||
key := "::.key"
|
||||
ln, err := net.Listen("tcp", "0.0.0.0:0")
|
||||
require.Nil(t, err, "expecting a port to have been assigned on which we can listen")
|
||||
srv, err := grpcdb.NewServer(cert, key)
|
||||
require.Nil(t, err)
|
||||
defer srv.Stop()
|
||||
go func() {
|
||||
if err := srv.Serve(ln); err != nil {
|
||||
t.Fatalf("BindServer: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert)
|
||||
require.Nil(t, err, "expecting a successful client creation")
|
||||
dbName := "test-remote-db"
|
||||
require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "leveldb"}))
|
||||
defer func() {
|
||||
err := os.RemoveAll(dbName + ".db")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
k1 := []byte("key-1")
|
||||
v1 := client.Get(k1)
|
||||
require.Equal(t, 0, len(v1), "expecting no key1 to have been stored, got %X (%s)", v1, v1)
|
||||
vv1 := []byte("value-1")
|
||||
client.Set(k1, vv1)
|
||||
gv1 := client.Get(k1)
|
||||
require.Equal(t, gv1, vv1)
|
||||
|
||||
// Simple iteration
|
||||
itr := client.Iterator(nil, nil)
|
||||
itr.Next()
|
||||
require.Equal(t, itr.Key(), []byte("key-1"))
|
||||
require.Equal(t, itr.Value(), []byte("value-1"))
|
||||
require.Panics(t, itr.Next)
|
||||
itr.Close()
|
||||
|
||||
// Set some more keys
|
||||
k2 := []byte("key-2")
|
||||
v2 := []byte("value-2")
|
||||
client.SetSync(k2, v2)
|
||||
has := client.Has(k2)
|
||||
require.True(t, has)
|
||||
gv2 := client.Get(k2)
|
||||
require.Equal(t, gv2, v2)
|
||||
|
||||
// More iteration
|
||||
itr = client.Iterator(nil, nil)
|
||||
itr.Next()
|
||||
require.Equal(t, itr.Key(), []byte("key-1"))
|
||||
require.Equal(t, itr.Value(), []byte("value-1"))
|
||||
itr.Next()
|
||||
require.Equal(t, itr.Key(), []byte("key-2"))
|
||||
require.Equal(t, itr.Value(), []byte("value-2"))
|
||||
require.Panics(t, itr.Next)
|
||||
itr.Close()
|
||||
|
||||
// Deletion
|
||||
client.Delete(k1)
|
||||
client.DeleteSync(k2)
|
||||
gv1 = client.Get(k1)
|
||||
gv2 = client.Get(k2)
|
||||
require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore")
|
||||
require.Equal(t, len(gv1), 0, "after deletion, not expecting the key to exist anymore")
|
||||
|
||||
// Batch tests - set
|
||||
k3 := []byte("key-3")
|
||||
k4 := []byte("key-4")
|
||||
k5 := []byte("key-5")
|
||||
v3 := []byte("value-3")
|
||||
v4 := []byte("value-4")
|
||||
v5 := []byte("value-5")
|
||||
bat := client.NewBatch()
|
||||
bat.Set(k3, v3)
|
||||
bat.Set(k4, v4)
|
||||
rv3 := client.Get(k3)
|
||||
require.Equal(t, 0, len(rv3), "expecting no k3 to have been stored")
|
||||
rv4 := client.Get(k4)
|
||||
require.Equal(t, 0, len(rv4), "expecting no k4 to have been stored")
|
||||
bat.Write()
|
||||
rv3 = client.Get(k3)
|
||||
require.Equal(t, rv3, v3, "expecting k3 to have been stored")
|
||||
rv4 = client.Get(k4)
|
||||
require.Equal(t, rv4, v4, "expecting k4 to have been stored")
|
||||
|
||||
// Batch tests - deletion
|
||||
bat = client.NewBatch()
|
||||
bat.Delete(k4)
|
||||
bat.Delete(k3)
|
||||
bat.WriteSync()
|
||||
rv3 = client.Get(k3)
|
||||
require.Equal(t, 0, len(rv3), "expecting k3 to have been deleted")
|
||||
rv4 = client.Get(k4)
|
||||
require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted")
|
||||
|
||||
// Batch tests - set and delete
|
||||
bat = client.NewBatch()
|
||||
bat.Set(k4, v4)
|
||||
bat.Set(k5, v5)
|
||||
bat.Delete(k4)
|
||||
bat.WriteSync()
|
||||
rv4 = client.Get(k4)
|
||||
require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted")
|
||||
rv5 := client.Get(k5)
|
||||
require.Equal(t, rv5, v5, "expecting k5 to have been stored")
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
.PHONY: docs
|
||||
REPO:=github.com/tendermint/tmlibs/events
|
||||
|
||||
docs:
|
||||
@go get github.com/davecheney/godoc2md
|
||||
godoc2md $(REPO) > README.md
|
||||
|
||||
test:
|
||||
go test -v ./...
|
175
events/README.md
175
events/README.md
|
@ -1,175 +0,0 @@
|
|||
|
||||
|
||||
# events
|
||||
`import "github.com/tendermint/tmlibs/events"`
|
||||
|
||||
* [Overview](#pkg-overview)
|
||||
* [Index](#pkg-index)
|
||||
|
||||
## <a name="pkg-overview">Overview</a>
|
||||
Pub-Sub in go with event caching
|
||||
|
||||
|
||||
|
||||
|
||||
## <a name="pkg-index">Index</a>
|
||||
* [type EventCache](#EventCache)
|
||||
* [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache)
|
||||
* [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent)
|
||||
* [func (evc *EventCache) Flush()](#EventCache.Flush)
|
||||
* [type EventCallback](#EventCallback)
|
||||
* [type EventData](#EventData)
|
||||
* [type EventSwitch](#EventSwitch)
|
||||
* [func NewEventSwitch() EventSwitch](#NewEventSwitch)
|
||||
* [type Eventable](#Eventable)
|
||||
* [type Fireable](#Fireable)
|
||||
|
||||
|
||||
#### <a name="pkg-files">Package files</a>
|
||||
[event_cache.go](/src/github.com/tendermint/tmlibs/events/event_cache.go) [events.go](/src/github.com/tendermint/tmlibs/events/events.go) [log.go](/src/github.com/tendermint/tmlibs/events/log.go)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## <a name="EventCache">type</a> [EventCache](/src/target/event_cache.go?s=152:215#L1)
|
||||
``` go
|
||||
type EventCache struct {
|
||||
// contains filtered or unexported fields
|
||||
}
|
||||
```
|
||||
An EventCache buffers events for a Fireable
|
||||
All events are cached. Filtering happens on Flush
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### <a name="NewEventCache">func</a> [NewEventCache](/src/target/event_cache.go?s=275:320#L5)
|
||||
``` go
|
||||
func NewEventCache(evsw Fireable) *EventCache
|
||||
```
|
||||
Create a new EventCache with an EventSwitch as backend
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### <a name="EventCache.FireEvent">func</a> (\*EventCache) [FireEvent](/src/target/event_cache.go?s=534:596#L19)
|
||||
``` go
|
||||
func (evc *EventCache) FireEvent(event string, data EventData)
|
||||
```
|
||||
Cache an event to be fired upon finality.
|
||||
|
||||
|
||||
|
||||
|
||||
### <a name="EventCache.Flush">func</a> (\*EventCache) [Flush](/src/target/event_cache.go?s=773:803#L26)
|
||||
``` go
|
||||
func (evc *EventCache) Flush()
|
||||
```
|
||||
Fire events by running evsw.FireEvent on all cached events. Blocks.
|
||||
Clears cached events
|
||||
|
||||
|
||||
|
||||
|
||||
## <a name="EventCallback">type</a> [EventCallback](/src/target/events.go?s=4182:4221#L175)
|
||||
``` go
|
||||
type EventCallback func(data EventData)
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## <a name="EventData">type</a> [EventData](/src/target/events.go?s=236:287#L4)
|
||||
``` go
|
||||
type EventData interface {
|
||||
}
|
||||
```
|
||||
Generic event data can be typed and registered with tendermint/go-amino
|
||||
via concrete implementation of this interface
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## <a name="EventSwitch">type</a> [EventSwitch](/src/target/events.go?s=553:760#L19)
|
||||
``` go
|
||||
type EventSwitch interface {
|
||||
Service
|
||||
Fireable
|
||||
|
||||
AddListenerForEvent(listenerID, event string, cb EventCallback)
|
||||
RemoveListenerForEvent(event string, listenerID string)
|
||||
RemoveListener(listenerID string)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### <a name="NewEventSwitch">func</a> [NewEventSwitch](/src/target/events.go?s=902:935#L36)
|
||||
``` go
|
||||
func NewEventSwitch() EventSwitch
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
## <a name="Eventable">type</a> [Eventable](/src/target/events.go?s=371:433#L10)
|
||||
``` go
|
||||
type Eventable interface {
|
||||
SetEventSwitch(evsw EventSwitch)
|
||||
}
|
||||
```
|
||||
reactors and other modules should export
|
||||
this interface to become eventable
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## <a name="Fireable">type</a> [Fireable](/src/target/events.go?s=483:551#L15)
|
||||
``` go
|
||||
type Fireable interface {
|
||||
FireEvent(event string, data EventData)
|
||||
}
|
||||
```
|
||||
an event switch or cache implements fireable
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- - -
|
||||
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
|
|
@ -1,37 +0,0 @@
|
|||
package events
|
||||
|
||||
// An EventCache buffers events for a Fireable
|
||||
// All events are cached. Filtering happens on Flush
|
||||
type EventCache struct {
|
||||
evsw Fireable
|
||||
events []eventInfo
|
||||
}
|
||||
|
||||
// Create a new EventCache with an EventSwitch as backend
|
||||
func NewEventCache(evsw Fireable) *EventCache {
|
||||
return &EventCache{
|
||||
evsw: evsw,
|
||||
}
|
||||
}
|
||||
|
||||
// a cached event
|
||||
type eventInfo struct {
|
||||
event string
|
||||
data EventData
|
||||
}
|
||||
|
||||
// Cache an event to be fired upon finality.
|
||||
func (evc *EventCache) FireEvent(event string, data EventData) {
|
||||
// append to list (go will grow our backing array exponentially)
|
||||
evc.events = append(evc.events, eventInfo{event, data})
|
||||
}
|
||||
|
||||
// Fire events by running evsw.FireEvent on all cached events. Blocks.
|
||||
// Clears cached events
|
||||
func (evc *EventCache) Flush() {
|
||||
for _, ei := range evc.events {
|
||||
evc.evsw.FireEvent(ei.event, ei.data)
|
||||
}
|
||||
// Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation
|
||||
evc.events = nil
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package events
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEventCache_Flush(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
evsw.Start()
|
||||
evsw.AddListenerForEvent("nothingness", "", func(data EventData) {
|
||||
// Check we are not initialising an empty buffer full of zeroed eventInfos in the EventCache
|
||||
require.FailNow(t, "We should never receive a message on this switch since none are fired")
|
||||
})
|
||||
evc := NewEventCache(evsw)
|
||||
evc.Flush()
|
||||
// Check after reset
|
||||
evc.Flush()
|
||||
fail := true
|
||||
pass := false
|
||||
evsw.AddListenerForEvent("somethingness", "something", func(data EventData) {
|
||||
if fail {
|
||||
require.FailNow(t, "Shouldn't see a message until flushed")
|
||||
}
|
||||
pass = true
|
||||
})
|
||||
evc.FireEvent("something", struct{ int }{1})
|
||||
evc.FireEvent("something", struct{ int }{2})
|
||||
evc.FireEvent("something", struct{ int }{3})
|
||||
fail = false
|
||||
evc.Flush()
|
||||
assert.True(t, pass)
|
||||
}
|
226
events/events.go
226
events/events.go
|
@ -1,226 +0,0 @@
|
|||
/*
|
||||
Pub-Sub in go with event caching
|
||||
*/
|
||||
package events
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
// Generic event data can be typed and registered with tendermint/go-amino
|
||||
// via concrete implementation of this interface
|
||||
type EventData interface {
|
||||
//AssertIsEventData()
|
||||
}
|
||||
|
||||
// reactors and other modules should export
|
||||
// this interface to become eventable
|
||||
type Eventable interface {
|
||||
SetEventSwitch(evsw EventSwitch)
|
||||
}
|
||||
|
||||
// an event switch or cache implements fireable
|
||||
type Fireable interface {
|
||||
FireEvent(event string, data EventData)
|
||||
}
|
||||
|
||||
type EventSwitch interface {
|
||||
cmn.Service
|
||||
Fireable
|
||||
|
||||
AddListenerForEvent(listenerID, event string, cb EventCallback)
|
||||
RemoveListenerForEvent(event string, listenerID string)
|
||||
RemoveListener(listenerID string)
|
||||
}
|
||||
|
||||
type eventSwitch struct {
|
||||
cmn.BaseService
|
||||
|
||||
mtx sync.RWMutex
|
||||
eventCells map[string]*eventCell
|
||||
listeners map[string]*eventListener
|
||||
}
|
||||
|
||||
func NewEventSwitch() EventSwitch {
|
||||
evsw := &eventSwitch{}
|
||||
evsw.BaseService = *cmn.NewBaseService(nil, "EventSwitch", evsw)
|
||||
return evsw
|
||||
}
|
||||
|
||||
func (evsw *eventSwitch) OnStart() error {
|
||||
evsw.BaseService.OnStart()
|
||||
evsw.eventCells = make(map[string]*eventCell)
|
||||
evsw.listeners = make(map[string]*eventListener)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (evsw *eventSwitch) OnStop() {
|
||||
evsw.mtx.Lock()
|
||||
defer evsw.mtx.Unlock()
|
||||
evsw.BaseService.OnStop()
|
||||
evsw.eventCells = nil
|
||||
evsw.listeners = nil
|
||||
}
|
||||
|
||||
func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) {
|
||||
// Get/Create eventCell and listener
|
||||
evsw.mtx.Lock()
|
||||
eventCell := evsw.eventCells[event]
|
||||
if eventCell == nil {
|
||||
eventCell = newEventCell()
|
||||
evsw.eventCells[event] = eventCell
|
||||
}
|
||||
listener := evsw.listeners[listenerID]
|
||||
if listener == nil {
|
||||
listener = newEventListener(listenerID)
|
||||
evsw.listeners[listenerID] = listener
|
||||
}
|
||||
evsw.mtx.Unlock()
|
||||
|
||||
// Add event and listener
|
||||
eventCell.AddListener(listenerID, cb)
|
||||
listener.AddEvent(event)
|
||||
}
|
||||
|
||||
func (evsw *eventSwitch) RemoveListener(listenerID string) {
|
||||
// Get and remove listener
|
||||
evsw.mtx.RLock()
|
||||
listener := evsw.listeners[listenerID]
|
||||
evsw.mtx.RUnlock()
|
||||
if listener == nil {
|
||||
return
|
||||
}
|
||||
|
||||
evsw.mtx.Lock()
|
||||
delete(evsw.listeners, listenerID)
|
||||
evsw.mtx.Unlock()
|
||||
|
||||
// Remove callback for each event.
|
||||
listener.SetRemoved()
|
||||
for _, event := range listener.GetEvents() {
|
||||
evsw.RemoveListenerForEvent(event, listenerID)
|
||||
}
|
||||
}
|
||||
|
||||
func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) {
|
||||
// Get eventCell
|
||||
evsw.mtx.Lock()
|
||||
eventCell := evsw.eventCells[event]
|
||||
evsw.mtx.Unlock()
|
||||
|
||||
if eventCell == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Remove listenerID from eventCell
|
||||
numListeners := eventCell.RemoveListener(listenerID)
|
||||
|
||||
// Maybe garbage collect eventCell.
|
||||
if numListeners == 0 {
|
||||
// Lock again and double check.
|
||||
evsw.mtx.Lock() // OUTER LOCK
|
||||
eventCell.mtx.Lock() // INNER LOCK
|
||||
if len(eventCell.listeners) == 0 {
|
||||
delete(evsw.eventCells, event)
|
||||
}
|
||||
eventCell.mtx.Unlock() // INNER LOCK
|
||||
evsw.mtx.Unlock() // OUTER LOCK
|
||||
}
|
||||
}
|
||||
|
||||
func (evsw *eventSwitch) FireEvent(event string, data EventData) {
|
||||
// Get the eventCell
|
||||
evsw.mtx.RLock()
|
||||
eventCell := evsw.eventCells[event]
|
||||
evsw.mtx.RUnlock()
|
||||
|
||||
if eventCell == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Fire event for all listeners in eventCell
|
||||
eventCell.FireEvent(data)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// eventCell handles keeping track of listener callbacks for a given event.
|
||||
type eventCell struct {
|
||||
mtx sync.RWMutex
|
||||
listeners map[string]EventCallback
|
||||
}
|
||||
|
||||
func newEventCell() *eventCell {
|
||||
return &eventCell{
|
||||
listeners: make(map[string]EventCallback),
|
||||
}
|
||||
}
|
||||
|
||||
func (cell *eventCell) AddListener(listenerID string, cb EventCallback) {
|
||||
cell.mtx.Lock()
|
||||
cell.listeners[listenerID] = cb
|
||||
cell.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (cell *eventCell) RemoveListener(listenerID string) int {
|
||||
cell.mtx.Lock()
|
||||
delete(cell.listeners, listenerID)
|
||||
numListeners := len(cell.listeners)
|
||||
cell.mtx.Unlock()
|
||||
return numListeners
|
||||
}
|
||||
|
||||
func (cell *eventCell) FireEvent(data EventData) {
|
||||
cell.mtx.RLock()
|
||||
for _, listener := range cell.listeners {
|
||||
listener(data)
|
||||
}
|
||||
cell.mtx.RUnlock()
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
type EventCallback func(data EventData)
|
||||
|
||||
type eventListener struct {
|
||||
id string
|
||||
|
||||
mtx sync.RWMutex
|
||||
removed bool
|
||||
events []string
|
||||
}
|
||||
|
||||
func newEventListener(id string) *eventListener {
|
||||
return &eventListener{
|
||||
id: id,
|
||||
removed: false,
|
||||
events: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (evl *eventListener) AddEvent(event string) {
|
||||
evl.mtx.Lock()
|
||||
defer evl.mtx.Unlock()
|
||||
|
||||
if evl.removed {
|
||||
return
|
||||
}
|
||||
evl.events = append(evl.events, event)
|
||||
}
|
||||
|
||||
func (evl *eventListener) GetEvents() []string {
|
||||
evl.mtx.RLock()
|
||||
defer evl.mtx.RUnlock()
|
||||
|
||||
events := make([]string, len(evl.events))
|
||||
copy(events, evl.events)
|
||||
return events
|
||||
}
|
||||
|
||||
func (evl *eventListener) SetRemoved() {
|
||||
evl.mtx.Lock()
|
||||
defer evl.mtx.Unlock()
|
||||
evl.removed = true
|
||||
}
|
|
@ -1,380 +0,0 @@
|
|||
package events
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single
|
||||
// listener to an event, and sends a string "data".
|
||||
func TestAddListenerForEventFireOnce(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to start EventSwitch, error: %v", err)
|
||||
}
|
||||
messages := make(chan EventData)
|
||||
evsw.AddListenerForEvent("listener", "event",
|
||||
func(data EventData) {
|
||||
messages <- data
|
||||
})
|
||||
go evsw.FireEvent("event", "data")
|
||||
received := <-messages
|
||||
if received != "data" {
|
||||
t.Errorf("Message received does not match: %v", received)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single
|
||||
// listener to an event, and sends a thousand integers.
|
||||
func TestAddListenerForEventFireMany(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to start EventSwitch, error: %v", err)
|
||||
}
|
||||
doneSum := make(chan uint64)
|
||||
doneSending := make(chan uint64)
|
||||
numbers := make(chan uint64, 4)
|
||||
// subscribe one listener for one event
|
||||
evsw.AddListenerForEvent("listener", "event",
|
||||
func(data EventData) {
|
||||
numbers <- data.(uint64)
|
||||
})
|
||||
// collect received events
|
||||
go sumReceivedNumbers(numbers, doneSum)
|
||||
// go fire events
|
||||
go fireEvents(evsw, "event", doneSending, uint64(1))
|
||||
checkSum := <-doneSending
|
||||
close(numbers)
|
||||
eventSum := <-doneSum
|
||||
if checkSum != eventSum {
|
||||
t.Errorf("Not all messages sent were received.\n")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddListenerForDifferentEvents sets up an EventSwitch, subscribes a single
|
||||
// listener to three different events and sends a thousand integers for each
|
||||
// of the three events.
|
||||
func TestAddListenerForDifferentEvents(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to start EventSwitch, error: %v", err)
|
||||
}
|
||||
doneSum := make(chan uint64)
|
||||
doneSending1 := make(chan uint64)
|
||||
doneSending2 := make(chan uint64)
|
||||
doneSending3 := make(chan uint64)
|
||||
numbers := make(chan uint64, 4)
|
||||
// subscribe one listener to three events
|
||||
evsw.AddListenerForEvent("listener", "event1",
|
||||
func(data EventData) {
|
||||
numbers <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener", "event2",
|
||||
func(data EventData) {
|
||||
numbers <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener", "event3",
|
||||
func(data EventData) {
|
||||
numbers <- data.(uint64)
|
||||
})
|
||||
// collect received events
|
||||
go sumReceivedNumbers(numbers, doneSum)
|
||||
// go fire events
|
||||
go fireEvents(evsw, "event1", doneSending1, uint64(1))
|
||||
go fireEvents(evsw, "event2", doneSending2, uint64(1))
|
||||
go fireEvents(evsw, "event3", doneSending3, uint64(1))
|
||||
var checkSum uint64 = 0
|
||||
checkSum += <-doneSending1
|
||||
checkSum += <-doneSending2
|
||||
checkSum += <-doneSending3
|
||||
close(numbers)
|
||||
eventSum := <-doneSum
|
||||
if checkSum != eventSum {
|
||||
t.Errorf("Not all messages sent were received.\n")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddDifferentListenerForDifferentEvents sets up an EventSwitch,
|
||||
// subscribes a first listener to three events, and subscribes a second
|
||||
// listener to two of those three events, and then sends a thousand integers
|
||||
// for each of the three events.
|
||||
func TestAddDifferentListenerForDifferentEvents(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to start EventSwitch, error: %v", err)
|
||||
}
|
||||
doneSum1 := make(chan uint64)
|
||||
doneSum2 := make(chan uint64)
|
||||
doneSending1 := make(chan uint64)
|
||||
doneSending2 := make(chan uint64)
|
||||
doneSending3 := make(chan uint64)
|
||||
numbers1 := make(chan uint64, 4)
|
||||
numbers2 := make(chan uint64, 4)
|
||||
// subscribe two listener to three events
|
||||
evsw.AddListenerForEvent("listener1", "event1",
|
||||
func(data EventData) {
|
||||
numbers1 <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener1", "event2",
|
||||
func(data EventData) {
|
||||
numbers1 <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener1", "event3",
|
||||
func(data EventData) {
|
||||
numbers1 <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener2", "event2",
|
||||
func(data EventData) {
|
||||
numbers2 <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener2", "event3",
|
||||
func(data EventData) {
|
||||
numbers2 <- data.(uint64)
|
||||
})
|
||||
// collect received events for listener1
|
||||
go sumReceivedNumbers(numbers1, doneSum1)
|
||||
// collect received events for listener2
|
||||
go sumReceivedNumbers(numbers2, doneSum2)
|
||||
// go fire events
|
||||
go fireEvents(evsw, "event1", doneSending1, uint64(1))
|
||||
go fireEvents(evsw, "event2", doneSending2, uint64(1001))
|
||||
go fireEvents(evsw, "event3", doneSending3, uint64(2001))
|
||||
checkSumEvent1 := <-doneSending1
|
||||
checkSumEvent2 := <-doneSending2
|
||||
checkSumEvent3 := <-doneSending3
|
||||
checkSum1 := checkSumEvent1 + checkSumEvent2 + checkSumEvent3
|
||||
checkSum2 := checkSumEvent2 + checkSumEvent3
|
||||
close(numbers1)
|
||||
close(numbers2)
|
||||
eventSum1 := <-doneSum1
|
||||
eventSum2 := <-doneSum2
|
||||
if checkSum1 != eventSum1 ||
|
||||
checkSum2 != eventSum2 {
|
||||
t.Errorf("Not all messages sent were received for different listeners to different events.\n")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to
|
||||
// two events, fires a thousand integers for the first event, then unsubscribes
|
||||
// the listener and fires a thousand integers for the second event.
|
||||
func TestAddAndRemoveListener(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to start EventSwitch, error: %v", err)
|
||||
}
|
||||
doneSum1 := make(chan uint64)
|
||||
doneSum2 := make(chan uint64)
|
||||
doneSending1 := make(chan uint64)
|
||||
doneSending2 := make(chan uint64)
|
||||
numbers1 := make(chan uint64, 4)
|
||||
numbers2 := make(chan uint64, 4)
|
||||
// subscribe two listener to three events
|
||||
evsw.AddListenerForEvent("listener", "event1",
|
||||
func(data EventData) {
|
||||
numbers1 <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener", "event2",
|
||||
func(data EventData) {
|
||||
numbers2 <- data.(uint64)
|
||||
})
|
||||
// collect received events for event1
|
||||
go sumReceivedNumbers(numbers1, doneSum1)
|
||||
// collect received events for event2
|
||||
go sumReceivedNumbers(numbers2, doneSum2)
|
||||
// go fire events
|
||||
go fireEvents(evsw, "event1", doneSending1, uint64(1))
|
||||
checkSumEvent1 := <-doneSending1
|
||||
// after sending all event1, unsubscribe for all events
|
||||
evsw.RemoveListener("listener")
|
||||
go fireEvents(evsw, "event2", doneSending2, uint64(1001))
|
||||
checkSumEvent2 := <-doneSending2
|
||||
close(numbers1)
|
||||
close(numbers2)
|
||||
eventSum1 := <-doneSum1
|
||||
eventSum2 := <-doneSum2
|
||||
if checkSumEvent1 != eventSum1 ||
|
||||
// correct value asserted by preceding tests, suffices to be non-zero
|
||||
checkSumEvent2 == uint64(0) ||
|
||||
eventSum2 != uint64(0) {
|
||||
t.Errorf("Not all messages sent were received or unsubscription did not register.\n")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRemoveListener does basic tests on adding and removing
|
||||
func TestRemoveListener(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to start EventSwitch, error: %v", err)
|
||||
}
|
||||
count := 10
|
||||
sum1, sum2 := 0, 0
|
||||
// add some listeners and make sure they work
|
||||
evsw.AddListenerForEvent("listener", "event1",
|
||||
func(data EventData) {
|
||||
sum1++
|
||||
})
|
||||
evsw.AddListenerForEvent("listener", "event2",
|
||||
func(data EventData) {
|
||||
sum2++
|
||||
})
|
||||
for i := 0; i < count; i++ {
|
||||
evsw.FireEvent("event1", true)
|
||||
evsw.FireEvent("event2", true)
|
||||
}
|
||||
assert.Equal(t, count, sum1)
|
||||
assert.Equal(t, count, sum2)
|
||||
|
||||
// remove one by event and make sure it is gone
|
||||
evsw.RemoveListenerForEvent("event2", "listener")
|
||||
for i := 0; i < count; i++ {
|
||||
evsw.FireEvent("event1", true)
|
||||
evsw.FireEvent("event2", true)
|
||||
}
|
||||
assert.Equal(t, count*2, sum1)
|
||||
assert.Equal(t, count, sum2)
|
||||
|
||||
// remove the listener entirely and make sure both gone
|
||||
evsw.RemoveListener("listener")
|
||||
for i := 0; i < count; i++ {
|
||||
evsw.FireEvent("event1", true)
|
||||
evsw.FireEvent("event2", true)
|
||||
}
|
||||
assert.Equal(t, count*2, sum1)
|
||||
assert.Equal(t, count, sum2)
|
||||
}
|
||||
|
||||
// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two
|
||||
// listeners to three events, and fires a thousand integers for each event.
|
||||
// These two listeners serve as the baseline validation while other listeners
|
||||
// are randomly subscribed and unsubscribed.
|
||||
// More precisely it randomly subscribes new listeners (different from the first
|
||||
// two listeners) to one of these three events. At the same time it starts
|
||||
// randomly unsubscribing these additional listeners from all events they are
|
||||
// at that point subscribed to.
|
||||
// NOTE: it is important to run this test with race conditions tracking on,
|
||||
// `go test -race`, to examine for possible race conditions.
|
||||
func TestRemoveListenersAsync(t *testing.T) {
|
||||
evsw := NewEventSwitch()
|
||||
err := evsw.Start()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to start EventSwitch, error: %v", err)
|
||||
}
|
||||
doneSum1 := make(chan uint64)
|
||||
doneSum2 := make(chan uint64)
|
||||
doneSending1 := make(chan uint64)
|
||||
doneSending2 := make(chan uint64)
|
||||
doneSending3 := make(chan uint64)
|
||||
numbers1 := make(chan uint64, 4)
|
||||
numbers2 := make(chan uint64, 4)
|
||||
// subscribe two listener to three events
|
||||
evsw.AddListenerForEvent("listener1", "event1",
|
||||
func(data EventData) {
|
||||
numbers1 <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener1", "event2",
|
||||
func(data EventData) {
|
||||
numbers1 <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener1", "event3",
|
||||
func(data EventData) {
|
||||
numbers1 <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener2", "event1",
|
||||
func(data EventData) {
|
||||
numbers2 <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener2", "event2",
|
||||
func(data EventData) {
|
||||
numbers2 <- data.(uint64)
|
||||
})
|
||||
evsw.AddListenerForEvent("listener2", "event3",
|
||||
func(data EventData) {
|
||||
numbers2 <- data.(uint64)
|
||||
})
|
||||
// collect received events for event1
|
||||
go sumReceivedNumbers(numbers1, doneSum1)
|
||||
// collect received events for event2
|
||||
go sumReceivedNumbers(numbers2, doneSum2)
|
||||
addListenersStress := func() {
|
||||
s1 := rand.NewSource(time.Now().UnixNano())
|
||||
r1 := rand.New(s1)
|
||||
for k := uint16(0); k < 400; k++ {
|
||||
listenerNumber := r1.Intn(100) + 3
|
||||
eventNumber := r1.Intn(3) + 1
|
||||
go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber),
|
||||
fmt.Sprintf("event%v", eventNumber),
|
||||
func(_ EventData) {})
|
||||
}
|
||||
}
|
||||
removeListenersStress := func() {
|
||||
s2 := rand.NewSource(time.Now().UnixNano())
|
||||
r2 := rand.New(s2)
|
||||
for k := uint16(0); k < 80; k++ {
|
||||
listenerNumber := r2.Intn(100) + 3
|
||||
go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber))
|
||||
}
|
||||
}
|
||||
addListenersStress()
|
||||
// go fire events
|
||||
go fireEvents(evsw, "event1", doneSending1, uint64(1))
|
||||
removeListenersStress()
|
||||
go fireEvents(evsw, "event2", doneSending2, uint64(1001))
|
||||
go fireEvents(evsw, "event3", doneSending3, uint64(2001))
|
||||
checkSumEvent1 := <-doneSending1
|
||||
checkSumEvent2 := <-doneSending2
|
||||
checkSumEvent3 := <-doneSending3
|
||||
checkSum := checkSumEvent1 + checkSumEvent2 + checkSumEvent3
|
||||
close(numbers1)
|
||||
close(numbers2)
|
||||
eventSum1 := <-doneSum1
|
||||
eventSum2 := <-doneSum2
|
||||
if checkSum != eventSum1 ||
|
||||
checkSum != eventSum2 {
|
||||
t.Errorf("Not all messages sent were received.\n")
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Helper functions
|
||||
|
||||
// sumReceivedNumbers takes two channels and adds all numbers received
|
||||
// until the receiving channel `numbers` is closed; it then sends the sum
|
||||
// on `doneSum` and closes that channel. Expected to be run in a go-routine.
|
||||
func sumReceivedNumbers(numbers, doneSum chan uint64) {
|
||||
var sum uint64 = 0
|
||||
for {
|
||||
j, more := <-numbers
|
||||
sum += j
|
||||
if !more {
|
||||
doneSum <- sum
|
||||
close(doneSum)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fireEvents takes an EventSwitch and fires a thousand integers under
|
||||
// a given `event` with the integers mootonically increasing from `offset`
|
||||
// to `offset` + 999. It additionally returns the addition of all integers
|
||||
// sent on `doneChan` for assertion that all events have been sent, and enabling
|
||||
// the test to assert all events have also been received.
|
||||
func fireEvents(evsw EventSwitch, event string, doneChan chan uint64,
|
||||
offset uint64) {
|
||||
var sentSum uint64 = 0
|
||||
for i := offset; i <= offset+uint64(999); i++ {
|
||||
sentSum += i
|
||||
evsw.FireEvent(event, i)
|
||||
}
|
||||
doneChan <- sentSum
|
||||
close(doneChan)
|
||||
}
|
|
@ -2,7 +2,7 @@ package merkle
|
|||
|
||||
import (
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
"github.com/tendermint/tmlibs/merkle/tmhash"
|
||||
)
|
||||
|
||||
type SimpleMap struct {
|
||||
|
@ -63,7 +63,7 @@ func (sm *SimpleMap) KVPairs() cmn.KVPairs {
|
|||
type KVPair cmn.KVPair
|
||||
|
||||
func (kv KVPair) Hash() []byte {
|
||||
hasher := ripemd160.New()
|
||||
hasher := tmhash.New()
|
||||
err := encodeByteSlice(hasher, kv.Key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
@ -17,37 +17,37 @@ func TestSimpleMap(t *testing.T) {
|
|||
{
|
||||
db := NewSimpleMap()
|
||||
db.Set("key1", strHasher("value1"))
|
||||
assert.Equal(t, "acdb4f121bc6f25041eb263ab463f1cd79236a32", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
assert.Equal(t, "3dafc06a52039d029be57c75c9d16356a4256ef4", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
}
|
||||
{
|
||||
db := NewSimpleMap()
|
||||
db.Set("key1", strHasher("value2"))
|
||||
assert.Equal(t, "b8cbf5adee8c524e14f531da9b49adbbbd66fffa", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
assert.Equal(t, "03eb5cfdff646bc4e80fec844e72fd248a1c6b2c", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
}
|
||||
{
|
||||
db := NewSimpleMap()
|
||||
db.Set("key1", strHasher("value1"))
|
||||
db.Set("key2", strHasher("value2"))
|
||||
assert.Equal(t, "1708aabc85bbe00242d3db8c299516aa54e48c38", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
assert.Equal(t, "acc3971eab8513171cc90ce8b74f368c38f9657d", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
}
|
||||
{
|
||||
db := NewSimpleMap()
|
||||
db.Set("key2", strHasher("value2")) // NOTE: out of order
|
||||
db.Set("key1", strHasher("value1"))
|
||||
assert.Equal(t, "1708aabc85bbe00242d3db8c299516aa54e48c38", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
assert.Equal(t, "acc3971eab8513171cc90ce8b74f368c38f9657d", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
}
|
||||
{
|
||||
db := NewSimpleMap()
|
||||
db.Set("key1", strHasher("value1"))
|
||||
db.Set("key2", strHasher("value2"))
|
||||
db.Set("key3", strHasher("value3"))
|
||||
assert.Equal(t, "e728afe72ce351eed6aca65c5f78da19b9a6e214", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
assert.Equal(t, "0cd117ad14e6cd22edcd9aa0d84d7063b54b862f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
}
|
||||
{
|
||||
db := NewSimpleMap()
|
||||
db.Set("key2", strHasher("value2")) // NOTE: out of order
|
||||
db.Set("key1", strHasher("value1"))
|
||||
db.Set("key3", strHasher("value3"))
|
||||
assert.Equal(t, "e728afe72ce351eed6aca65c5f78da19b9a6e214", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
assert.Equal(t, "0cd117ad14e6cd22edcd9aa0d84d7063b54b862f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,11 +25,11 @@ For larger datasets, use IAVLTree.
|
|||
package merkle
|
||||
|
||||
import (
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
"github.com/tendermint/tmlibs/merkle/tmhash"
|
||||
)
|
||||
|
||||
func SimpleHashFromTwoHashes(left []byte, right []byte) []byte {
|
||||
var hasher = ripemd160.New()
|
||||
var hasher = tmhash.New()
|
||||
err := encodeByteSlice(hasher, left)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -68,7 +68,7 @@ func SimpleHashFromByteslices(bzs [][]byte) []byte {
|
|||
}
|
||||
|
||||
func SimpleHashFromBytes(bz []byte) []byte {
|
||||
hasher := ripemd160.New()
|
||||
hasher := tmhash.New()
|
||||
hasher.Write(bz)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
package tmhash
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
)
|
||||
|
||||
var (
|
||||
Size = 20
|
||||
BlockSize = sha256.BlockSize
|
||||
)
|
||||
|
||||
type sha256trunc struct {
|
||||
sha256 hash.Hash
|
||||
}
|
||||
|
||||
func (h sha256trunc) Write(p []byte) (n int, err error) {
|
||||
return h.sha256.Write(p)
|
||||
}
|
||||
func (h sha256trunc) Sum(b []byte) []byte {
|
||||
shasum := h.sha256.Sum(b)
|
||||
return shasum[:Size]
|
||||
}
|
||||
|
||||
func (h sha256trunc) Reset() {
|
||||
h.sha256.Reset()
|
||||
}
|
||||
|
||||
func (h sha256trunc) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
func (h sha256trunc) BlockSize() int {
|
||||
return h.sha256.BlockSize()
|
||||
}
|
||||
|
||||
func New() hash.Hash {
|
||||
return sha256trunc{
|
||||
sha256: sha256.New(),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package tmhash_test
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tendermint/tmlibs/merkle/tmhash"
|
||||
)
|
||||
|
||||
func TestHash(t *testing.T) {
|
||||
testVector := []byte("abc")
|
||||
hasher := tmhash.New()
|
||||
hasher.Write(testVector)
|
||||
bz := hasher.Sum(nil)
|
||||
|
||||
hasher = sha256.New()
|
||||
hasher.Write(testVector)
|
||||
bz2 := hasher.Sum(nil)
|
||||
bz2 = bz2[:20]
|
||||
|
||||
assert.Equal(t, bz, bz2)
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package pubsub_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
"github.com/tendermint/tmlibs/pubsub"
|
||||
"github.com/tendermint/tmlibs/pubsub/query"
|
||||
)
|
||||
|
||||
func TestExample(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
ch := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"), ch)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Tombstone", pubsub.NewTagMap(map[string]interface{}{"abci.account.name": "John"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Tombstone", ch)
|
||||
}
|
342
pubsub/pubsub.go
342
pubsub/pubsub.go
|
@ -1,342 +0,0 @@
|
|||
// Package pubsub implements a pub-sub model with a single publisher (Server)
|
||||
// and multiple subscribers (clients).
|
||||
//
|
||||
// Though you can have multiple publishers by sharing a pointer to a server or
|
||||
// by giving the same channel to each publisher and publishing messages from
|
||||
// that channel (fan-in).
|
||||
//
|
||||
// Clients subscribe for messages, which could be of any type, using a query.
|
||||
// When some message is published, we match it with all queries. If there is a
|
||||
// match, this message will be pushed to all clients, subscribed to that query.
|
||||
// See query subpackage for our implementation.
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
type operation int
|
||||
|
||||
const (
|
||||
sub operation = iota
|
||||
pub
|
||||
unsub
|
||||
shutdown
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrSubscriptionNotFound is returned when a client tries to unsubscribe
|
||||
// from not existing subscription.
|
||||
ErrSubscriptionNotFound = errors.New("subscription not found")
|
||||
|
||||
// ErrAlreadySubscribed is returned when a client tries to subscribe twice or
|
||||
// more using the same query.
|
||||
ErrAlreadySubscribed = errors.New("already subscribed")
|
||||
)
|
||||
|
||||
// TagMap is used to associate tags to a message.
|
||||
// They can be queried by subscribers to choose messages they will received.
|
||||
type TagMap interface {
|
||||
// Get returns the value for a key, or nil if no value is present.
|
||||
// The ok result indicates whether value was found in the tags.
|
||||
Get(key string) (value interface{}, ok bool)
|
||||
// Len returns the number of tags.
|
||||
Len() int
|
||||
}
|
||||
|
||||
type tagMap map[string]interface{}
|
||||
|
||||
type cmd struct {
|
||||
op operation
|
||||
query Query
|
||||
ch chan<- interface{}
|
||||
clientID string
|
||||
msg interface{}
|
||||
tags TagMap
|
||||
}
|
||||
|
||||
// Query defines an interface for a query to be used for subscribing.
|
||||
type Query interface {
|
||||
Matches(tags TagMap) bool
|
||||
String() string
|
||||
}
|
||||
|
||||
// Server allows clients to subscribe/unsubscribe for messages, publishing
|
||||
// messages with or without tags, and manages internal state.
|
||||
type Server struct {
|
||||
cmn.BaseService
|
||||
|
||||
cmds chan cmd
|
||||
cmdsCap int
|
||||
|
||||
mtx sync.RWMutex
|
||||
subscriptions map[string]map[string]Query // subscriber -> query (string) -> Query
|
||||
}
|
||||
|
||||
// Option sets a parameter for the server.
|
||||
type Option func(*Server)
|
||||
|
||||
// NewTagMap constructs a new immutable tag set from a map.
|
||||
func NewTagMap(data map[string]interface{}) TagMap {
|
||||
return tagMap(data)
|
||||
}
|
||||
|
||||
// Get returns the value for a key, or nil if no value is present.
|
||||
// The ok result indicates whether value was found in the tags.
|
||||
func (ts tagMap) Get(key string) (value interface{}, ok bool) {
|
||||
value, ok = ts[key]
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of tags.
|
||||
func (ts tagMap) Len() int {
|
||||
return len(ts)
|
||||
}
|
||||
|
||||
// NewServer returns a new server. See the commentary on the Option functions
|
||||
// for a detailed description of how to configure buffering. If no options are
|
||||
// provided, the resulting server's queue is unbuffered.
|
||||
func NewServer(options ...Option) *Server {
|
||||
s := &Server{
|
||||
subscriptions: make(map[string]map[string]Query),
|
||||
}
|
||||
s.BaseService = *cmn.NewBaseService(nil, "PubSub", s)
|
||||
|
||||
for _, option := range options {
|
||||
option(s)
|
||||
}
|
||||
|
||||
// if BufferCapacity option was not set, the channel is unbuffered
|
||||
s.cmds = make(chan cmd, s.cmdsCap)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// BufferCapacity allows you to specify capacity for the internal server's
|
||||
// queue. Since the server, given Y subscribers, could only process X messages,
|
||||
// this option could be used to survive spikes (e.g. high amount of
|
||||
// transactions during peak hours).
|
||||
func BufferCapacity(cap int) Option {
|
||||
return func(s *Server) {
|
||||
if cap > 0 {
|
||||
s.cmdsCap = cap
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BufferCapacity returns capacity of the internal server's queue.
|
||||
func (s *Server) BufferCapacity() int {
|
||||
return s.cmdsCap
|
||||
}
|
||||
|
||||
// Subscribe creates a subscription for the given client. It accepts a channel
|
||||
// on which messages matching the given query can be received. An error will be
|
||||
// returned to the caller if the context is canceled or if subscription already
|
||||
// exist for pair clientID and query.
|
||||
func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, out chan<- interface{}) error {
|
||||
s.mtx.RLock()
|
||||
clientSubscriptions, ok := s.subscriptions[clientID]
|
||||
if ok {
|
||||
_, ok = clientSubscriptions[query.String()]
|
||||
}
|
||||
s.mtx.RUnlock()
|
||||
if ok {
|
||||
return ErrAlreadySubscribed
|
||||
}
|
||||
|
||||
select {
|
||||
case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}:
|
||||
s.mtx.Lock()
|
||||
if _, ok = s.subscriptions[clientID]; !ok {
|
||||
s.subscriptions[clientID] = make(map[string]Query)
|
||||
}
|
||||
s.subscriptions[clientID][query.String()] = query
|
||||
s.mtx.Unlock()
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Unsubscribe removes the subscription on the given query. An error will be
|
||||
// returned to the caller if the context is canceled or if subscription does
|
||||
// not exist.
|
||||
func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error {
|
||||
var origQuery Query
|
||||
s.mtx.RLock()
|
||||
clientSubscriptions, ok := s.subscriptions[clientID]
|
||||
if ok {
|
||||
origQuery, ok = clientSubscriptions[query.String()]
|
||||
}
|
||||
s.mtx.RUnlock()
|
||||
if !ok {
|
||||
return ErrSubscriptionNotFound
|
||||
}
|
||||
|
||||
// original query is used here because we're using pointers as map keys
|
||||
select {
|
||||
case s.cmds <- cmd{op: unsub, clientID: clientID, query: origQuery}:
|
||||
s.mtx.Lock()
|
||||
delete(clientSubscriptions, query.String())
|
||||
s.mtx.Unlock()
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// UnsubscribeAll removes all client subscriptions. An error will be returned
|
||||
// to the caller if the context is canceled or if subscription does not exist.
|
||||
func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error {
|
||||
s.mtx.RLock()
|
||||
_, ok := s.subscriptions[clientID]
|
||||
s.mtx.RUnlock()
|
||||
if !ok {
|
||||
return ErrSubscriptionNotFound
|
||||
}
|
||||
|
||||
select {
|
||||
case s.cmds <- cmd{op: unsub, clientID: clientID}:
|
||||
s.mtx.Lock()
|
||||
delete(s.subscriptions, clientID)
|
||||
s.mtx.Unlock()
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Publish publishes the given message. An error will be returned to the caller
|
||||
// if the context is canceled.
|
||||
func (s *Server) Publish(ctx context.Context, msg interface{}) error {
|
||||
return s.PublishWithTags(ctx, msg, NewTagMap(make(map[string]interface{})))
|
||||
}
|
||||
|
||||
// PublishWithTags publishes the given message with the set of tags. The set is
|
||||
// matched with clients queries. If there is a match, the message is sent to
|
||||
// the client.
|
||||
func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagMap) error {
|
||||
select {
|
||||
case s.cmds <- cmd{op: pub, msg: msg, tags: tags}:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// OnStop implements Service.OnStop by shutting down the server.
|
||||
func (s *Server) OnStop() {
|
||||
s.cmds <- cmd{op: shutdown}
|
||||
}
|
||||
|
||||
// NOTE: not goroutine safe
|
||||
type state struct {
|
||||
// query -> client -> ch
|
||||
queries map[Query]map[string]chan<- interface{}
|
||||
// client -> query -> struct{}
|
||||
clients map[string]map[Query]struct{}
|
||||
}
|
||||
|
||||
// OnStart implements Service.OnStart by starting the server.
|
||||
func (s *Server) OnStart() error {
|
||||
go s.loop(state{
|
||||
queries: make(map[Query]map[string]chan<- interface{}),
|
||||
clients: make(map[string]map[Query]struct{}),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnReset implements Service.OnReset
|
||||
func (s *Server) OnReset() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) loop(state state) {
|
||||
loop:
|
||||
for cmd := range s.cmds {
|
||||
switch cmd.op {
|
||||
case unsub:
|
||||
if cmd.query != nil {
|
||||
state.remove(cmd.clientID, cmd.query)
|
||||
} else {
|
||||
state.removeAll(cmd.clientID)
|
||||
}
|
||||
case shutdown:
|
||||
for clientID := range state.clients {
|
||||
state.removeAll(clientID)
|
||||
}
|
||||
break loop
|
||||
case sub:
|
||||
state.add(cmd.clientID, cmd.query, cmd.ch)
|
||||
case pub:
|
||||
state.send(cmd.msg, cmd.tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (state *state) add(clientID string, q Query, ch chan<- interface{}) {
|
||||
// add query if needed
|
||||
if _, ok := state.queries[q]; !ok {
|
||||
state.queries[q] = make(map[string]chan<- interface{})
|
||||
}
|
||||
|
||||
// create subscription
|
||||
state.queries[q][clientID] = ch
|
||||
|
||||
// add client if needed
|
||||
if _, ok := state.clients[clientID]; !ok {
|
||||
state.clients[clientID] = make(map[Query]struct{})
|
||||
}
|
||||
state.clients[clientID][q] = struct{}{}
|
||||
}
|
||||
|
||||
func (state *state) remove(clientID string, q Query) {
|
||||
clientToChannelMap, ok := state.queries[q]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ch, ok := clientToChannelMap[clientID]
|
||||
if ok {
|
||||
close(ch)
|
||||
|
||||
delete(state.clients[clientID], q)
|
||||
|
||||
// if it not subscribed to anything else, remove the client
|
||||
if len(state.clients[clientID]) == 0 {
|
||||
delete(state.clients, clientID)
|
||||
}
|
||||
|
||||
delete(state.queries[q], clientID)
|
||||
}
|
||||
}
|
||||
|
||||
func (state *state) removeAll(clientID string) {
|
||||
queryMap, ok := state.clients[clientID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for q := range queryMap {
|
||||
ch := state.queries[q][clientID]
|
||||
close(ch)
|
||||
|
||||
delete(state.queries[q], clientID)
|
||||
}
|
||||
|
||||
delete(state.clients, clientID)
|
||||
}
|
||||
|
||||
func (state *state) send(msg interface{}, tags TagMap) {
|
||||
for q, clientToChannelMap := range state.queries {
|
||||
if q.Matches(tags) {
|
||||
for _, ch := range clientToChannelMap {
|
||||
ch <- msg
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,252 +0,0 @@
|
|||
package pubsub_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
"github.com/tendermint/tmlibs/pubsub"
|
||||
"github.com/tendermint/tmlibs/pubsub/query"
|
||||
)
|
||||
|
||||
const (
|
||||
clientID = "test-client"
|
||||
)
|
||||
|
||||
func TestSubscribe(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
ch := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, clientID, query.Empty{}, ch)
|
||||
require.NoError(t, err)
|
||||
err = s.Publish(ctx, "Ka-Zar")
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Ka-Zar", ch)
|
||||
|
||||
err = s.Publish(ctx, "Quicksilver")
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Quicksilver", ch)
|
||||
}
|
||||
|
||||
func TestDifferentClients(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
ch1 := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"), ch1)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Iceman", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Iceman", ch1)
|
||||
|
||||
ch2 := make(chan interface{}, 1)
|
||||
err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), ch2)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Ultimo", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Ultimo", ch1)
|
||||
assertReceive(t, "Ultimo", ch2)
|
||||
|
||||
ch3 := make(chan interface{}, 1)
|
||||
err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), ch3)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Valeria Richards", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewRoundStep"}))
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, len(ch3))
|
||||
}
|
||||
|
||||
func TestClientSubscribesTwice(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
q := query.MustParse("tm.events.type='NewBlock'")
|
||||
|
||||
ch1 := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, clientID, q, ch1)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Goblin Queen", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Goblin Queen", ch1)
|
||||
|
||||
ch2 := make(chan interface{}, 1)
|
||||
err = s.Subscribe(ctx, clientID, q, ch2)
|
||||
require.Error(t, err)
|
||||
|
||||
err = s.PublishWithTags(ctx, "Spider-Man", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Spider-Man", ch1)
|
||||
}
|
||||
|
||||
func TestUnsubscribe(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
ch := make(chan interface{})
|
||||
err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch)
|
||||
require.NoError(t, err)
|
||||
err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.Publish(ctx, "Nick Fury")
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, len(ch), "Should not receive anything after Unsubscribe")
|
||||
|
||||
_, ok := <-ch
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestResubscribe(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
ch := make(chan interface{})
|
||||
err := s.Subscribe(ctx, clientID, query.Empty{}, ch)
|
||||
require.NoError(t, err)
|
||||
err = s.Unsubscribe(ctx, clientID, query.Empty{})
|
||||
require.NoError(t, err)
|
||||
ch = make(chan interface{})
|
||||
err = s.Subscribe(ctx, clientID, query.Empty{}, ch)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.Publish(ctx, "Cable")
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Cable", ch)
|
||||
}
|
||||
|
||||
func TestUnsubscribeAll(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch1)
|
||||
require.NoError(t, err)
|
||||
err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'"), ch2)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.UnsubscribeAll(ctx, clientID)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.Publish(ctx, "Nick Fury")
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, len(ch1), "Should not receive anything after UnsubscribeAll")
|
||||
assert.Zero(t, len(ch2), "Should not receive anything after UnsubscribeAll")
|
||||
|
||||
_, ok := <-ch1
|
||||
assert.False(t, ok)
|
||||
_, ok = <-ch2
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestBufferCapacity(t *testing.T) {
|
||||
s := pubsub.NewServer(pubsub.BufferCapacity(2))
|
||||
s.SetLogger(log.TestingLogger())
|
||||
|
||||
assert.Equal(t, 2, s.BufferCapacity())
|
||||
|
||||
ctx := context.Background()
|
||||
err := s.Publish(ctx, "Nighthawk")
|
||||
require.NoError(t, err)
|
||||
err = s.Publish(ctx, "Sage")
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
|
||||
defer cancel()
|
||||
err = s.Publish(ctx, "Ironclad")
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, context.DeadlineExceeded, err)
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) }
|
||||
func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) }
|
||||
func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) }
|
||||
|
||||
func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) }
|
||||
func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) }
|
||||
func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) }
|
||||
|
||||
func benchmarkNClients(n int, b *testing.B) {
|
||||
s := pubsub.NewServer()
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
for i := 0; i < n; i++ {
|
||||
ch := make(chan interface{})
|
||||
go func() {
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), ch)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": i}))
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkNClientsOneQuery(n int, b *testing.B) {
|
||||
s := pubsub.NewServer()
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1")
|
||||
for i := 0; i < n; i++ {
|
||||
ch := make(chan interface{})
|
||||
go func() {
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
s.Subscribe(ctx, clientID, q, ch)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": 1}))
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
/// HELPERS
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func assertReceive(t *testing.T, expected interface{}, ch <-chan interface{}, msgAndArgs ...interface{}) {
|
||||
select {
|
||||
case actual := <-ch:
|
||||
if actual != nil {
|
||||
assert.Equal(t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Errorf("Expected to receive %v from the channel, got nothing after 1s", expected)
|
||||
debug.PrintStack()
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
gen_query_parser:
|
||||
@go get github.com/pointlander/peg
|
||||
peg -inline -switch query.peg
|
||||
|
||||
fuzzy_test:
|
||||
@go get github.com/dvyukov/go-fuzz/go-fuzz
|
||||
@go get github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
go-fuzz-build github.com/tendermint/tmlibs/pubsub/query/fuzz_test
|
||||
go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output
|
||||
|
||||
.PHONY: gen_query_parser fuzzy_test
|
|
@ -1,16 +0,0 @@
|
|||
package query
|
||||
|
||||
import "github.com/tendermint/tmlibs/pubsub"
|
||||
|
||||
// Empty query matches any set of tags.
|
||||
type Empty struct {
|
||||
}
|
||||
|
||||
// Matches always returns true.
|
||||
func (Empty) Matches(tags pubsub.TagMap) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (Empty) String() string {
|
||||
return "empty"
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
package query_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tendermint/tmlibs/pubsub"
|
||||
"github.com/tendermint/tmlibs/pubsub/query"
|
||||
)
|
||||
|
||||
func TestEmptyQueryMatchesAnything(t *testing.T) {
|
||||
q := query.Empty{}
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Asher": "Roth"})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Route": 66})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Route": 66, "Billy": "Blue"})))
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
package fuzz_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tmlibs/pubsub/query"
|
||||
)
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
sdata := string(data)
|
||||
q0, err := query.New(sdata)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
sdata1 := q0.String()
|
||||
q1, err := query.New(sdata1)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
sdata2 := q1.String()
|
||||
if sdata1 != sdata2 {
|
||||
fmt.Printf("q0: %q\n", sdata1)
|
||||
fmt.Printf("q1: %q\n", sdata2)
|
||||
panic("query changed")
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
|
@ -1,91 +0,0 @@
|
|||
package query_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tendermint/tmlibs/pubsub/query"
|
||||
)
|
||||
|
||||
// TODO: fuzzy testing?
|
||||
func TestParser(t *testing.T) {
|
||||
cases := []struct {
|
||||
query string
|
||||
valid bool
|
||||
}{
|
||||
{"tm.events.type='NewBlock'", true},
|
||||
{"tm.events.type = 'NewBlock'", true},
|
||||
{"tm.events.name = ''", true},
|
||||
{"tm.events.type='TIME'", true},
|
||||
{"tm.events.type='DATE'", true},
|
||||
{"tm.events.type='='", true},
|
||||
{"tm.events.type='TIME", false},
|
||||
{"tm.events.type=TIME'", false},
|
||||
{"tm.events.type==", false},
|
||||
{"tm.events.type=NewBlock", false},
|
||||
{">==", false},
|
||||
{"tm.events.type 'NewBlock' =", false},
|
||||
{"tm.events.type>'NewBlock'", false},
|
||||
{"", false},
|
||||
{"=", false},
|
||||
{"='NewBlock'", false},
|
||||
{"tm.events.type=", false},
|
||||
|
||||
{"tm.events.typeNewBlock", false},
|
||||
{"tm.events.type'NewBlock'", false},
|
||||
{"'NewBlock'", false},
|
||||
{"NewBlock", false},
|
||||
{"", false},
|
||||
|
||||
{"tm.events.type='NewBlock' AND abci.account.name='Igor'", true},
|
||||
{"tm.events.type='NewBlock' AND", false},
|
||||
{"tm.events.type='NewBlock' AN", false},
|
||||
{"tm.events.type='NewBlock' AN tm.events.type='NewBlockHeader'", false},
|
||||
{"AND tm.events.type='NewBlock' ", false},
|
||||
|
||||
{"abci.account.name CONTAINS 'Igor'", true},
|
||||
|
||||
{"tx.date > DATE 2013-05-03", true},
|
||||
{"tx.date < DATE 2013-05-03", true},
|
||||
{"tx.date <= DATE 2013-05-03", true},
|
||||
{"tx.date >= DATE 2013-05-03", true},
|
||||
{"tx.date >= DAT 2013-05-03", false},
|
||||
{"tx.date <= DATE2013-05-03", false},
|
||||
{"tx.date <= DATE -05-03", false},
|
||||
{"tx.date >= DATE 20130503", false},
|
||||
{"tx.date >= DATE 2013+01-03", false},
|
||||
// incorrect year, month, day
|
||||
{"tx.date >= DATE 0013-01-03", false},
|
||||
{"tx.date >= DATE 2013-31-03", false},
|
||||
{"tx.date >= DATE 2013-01-83", false},
|
||||
|
||||
{"tx.date > TIME 2013-05-03T14:45:00+07:00", true},
|
||||
{"tx.date < TIME 2013-05-03T14:45:00-02:00", true},
|
||||
{"tx.date <= TIME 2013-05-03T14:45:00Z", true},
|
||||
{"tx.date >= TIME 2013-05-03T14:45:00Z", true},
|
||||
{"tx.date >= TIME2013-05-03T14:45:00Z", false},
|
||||
{"tx.date = IME 2013-05-03T14:45:00Z", false},
|
||||
{"tx.date = TIME 2013-05-:45:00Z", false},
|
||||
{"tx.date >= TIME 2013-05-03T14:45:00", false},
|
||||
{"tx.date >= TIME 0013-00-00T14:45:00Z", false},
|
||||
{"tx.date >= TIME 2013+05=03T14:45:00Z", false},
|
||||
|
||||
{"account.balance=100", true},
|
||||
{"account.balance >= 200", true},
|
||||
{"account.balance >= -300", false},
|
||||
{"account.balance >>= 400", false},
|
||||
{"account.balance=33.22.1", false},
|
||||
|
||||
{"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true},
|
||||
{"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
_, err := query.New(c.query)
|
||||
if c.valid {
|
||||
assert.NoErrorf(t, err, "Query was '%s'", c.query)
|
||||
} else {
|
||||
assert.Errorf(t, err, "Query was '%s'", c.query)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,345 +0,0 @@
|
|||
// Package query provides a parser for a custom query format:
|
||||
//
|
||||
// abci.invoice.number=22 AND abci.invoice.owner=Ivan
|
||||
//
|
||||
// See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar.
|
||||
// More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics
|
||||
//
|
||||
// It has a support for numbers (integer and floating point), dates and times.
|
||||
package query
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tmlibs/pubsub"
|
||||
)
|
||||
|
||||
// Query holds the query string and the query parser.
|
||||
type Query struct {
|
||||
str string
|
||||
parser *QueryParser
|
||||
}
|
||||
|
||||
// Condition represents a single condition within a query and consists of tag
|
||||
// (e.g. "tx.gas"), operator (e.g. "=") and operand (e.g. "7").
|
||||
type Condition struct {
|
||||
Tag string
|
||||
Op Operator
|
||||
Operand interface{}
|
||||
}
|
||||
|
||||
// New parses the given string and returns a query or error if the string is
|
||||
// invalid.
|
||||
func New(s string) (*Query, error) {
|
||||
p := &QueryParser{Buffer: fmt.Sprintf(`"%s"`, s)}
|
||||
p.Init()
|
||||
if err := p.Parse(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Query{str: s, parser: p}, nil
|
||||
}
|
||||
|
||||
// MustParse turns the given string into a query or panics; for tests or others
|
||||
// cases where you know the string is valid.
|
||||
func MustParse(s string) *Query {
|
||||
q, err := New(s)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse %s: %v", s, err))
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
// String returns the original string.
|
||||
func (q *Query) String() string {
|
||||
return q.str
|
||||
}
|
||||
|
||||
// Operator is an operator that defines some kind of relation between tag and
|
||||
// operand (equality, etc.).
|
||||
type Operator uint8
|
||||
|
||||
const (
|
||||
// "<="
|
||||
OpLessEqual Operator = iota
|
||||
// ">="
|
||||
OpGreaterEqual
|
||||
// "<"
|
||||
OpLess
|
||||
// ">"
|
||||
OpGreater
|
||||
// "="
|
||||
OpEqual
|
||||
// "CONTAINS"; used to check if a string contains a certain sub string.
|
||||
OpContains
|
||||
)
|
||||
|
||||
// Conditions returns a list of conditions.
|
||||
func (q *Query) Conditions() []Condition {
|
||||
conditions := make([]Condition, 0)
|
||||
|
||||
buffer, begin, end := q.parser.Buffer, 0, 0
|
||||
|
||||
var tag string
|
||||
var op Operator
|
||||
|
||||
// tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7")
|
||||
for _, token := range q.parser.Tokens() {
|
||||
switch token.pegRule {
|
||||
|
||||
case rulePegText:
|
||||
begin, end = int(token.begin), int(token.end)
|
||||
case ruletag:
|
||||
tag = buffer[begin:end]
|
||||
case rulele:
|
||||
op = OpLessEqual
|
||||
case rulege:
|
||||
op = OpGreaterEqual
|
||||
case rulel:
|
||||
op = OpLess
|
||||
case ruleg:
|
||||
op = OpGreater
|
||||
case ruleequal:
|
||||
op = OpEqual
|
||||
case rulecontains:
|
||||
op = OpContains
|
||||
case rulevalue:
|
||||
// strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock")
|
||||
valueWithoutSingleQuotes := buffer[begin+1 : end-1]
|
||||
conditions = append(conditions, Condition{tag, op, valueWithoutSingleQuotes})
|
||||
case rulenumber:
|
||||
number := buffer[begin:end]
|
||||
if strings.Contains(number, ".") { // if it looks like a floating-point number
|
||||
value, err := strconv.ParseFloat(number, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number))
|
||||
}
|
||||
conditions = append(conditions, Condition{tag, op, value})
|
||||
} else {
|
||||
value, err := strconv.ParseInt(number, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number))
|
||||
}
|
||||
conditions = append(conditions, Condition{tag, op, value})
|
||||
}
|
||||
case ruletime:
|
||||
value, err := time.Parse(time.RFC3339, buffer[begin:end])
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end]))
|
||||
}
|
||||
conditions = append(conditions, Condition{tag, op, value})
|
||||
case ruledate:
|
||||
value, err := time.Parse("2006-01-02", buffer[begin:end])
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end]))
|
||||
}
|
||||
conditions = append(conditions, Condition{tag, op, value})
|
||||
}
|
||||
}
|
||||
|
||||
return conditions
|
||||
}
|
||||
|
||||
// Matches returns true if the query matches the given set of tags, false otherwise.
|
||||
//
|
||||
// For example, query "name=John" matches tags = {"name": "John"}. More
|
||||
// examples could be found in parser_test.go and query_test.go.
|
||||
func (q *Query) Matches(tags pubsub.TagMap) bool {
|
||||
if tags.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
buffer, begin, end := q.parser.Buffer, 0, 0
|
||||
|
||||
var tag string
|
||||
var op Operator
|
||||
|
||||
// tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7")
|
||||
for _, token := range q.parser.Tokens() {
|
||||
switch token.pegRule {
|
||||
|
||||
case rulePegText:
|
||||
begin, end = int(token.begin), int(token.end)
|
||||
case ruletag:
|
||||
tag = buffer[begin:end]
|
||||
case rulele:
|
||||
op = OpLessEqual
|
||||
case rulege:
|
||||
op = OpGreaterEqual
|
||||
case rulel:
|
||||
op = OpLess
|
||||
case ruleg:
|
||||
op = OpGreater
|
||||
case ruleequal:
|
||||
op = OpEqual
|
||||
case rulecontains:
|
||||
op = OpContains
|
||||
case rulevalue:
|
||||
// strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock")
|
||||
valueWithoutSingleQuotes := buffer[begin+1 : end-1]
|
||||
|
||||
// see if the triplet (tag, operator, operand) matches any tag
|
||||
// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" }
|
||||
if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), tags) {
|
||||
return false
|
||||
}
|
||||
case rulenumber:
|
||||
number := buffer[begin:end]
|
||||
if strings.Contains(number, ".") { // if it looks like a floating-point number
|
||||
value, err := strconv.ParseFloat(number, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number))
|
||||
}
|
||||
if !match(tag, op, reflect.ValueOf(value), tags) {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
value, err := strconv.ParseInt(number, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number))
|
||||
}
|
||||
if !match(tag, op, reflect.ValueOf(value), tags) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
case ruletime:
|
||||
value, err := time.Parse(time.RFC3339, buffer[begin:end])
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end]))
|
||||
}
|
||||
if !match(tag, op, reflect.ValueOf(value), tags) {
|
||||
return false
|
||||
}
|
||||
case ruledate:
|
||||
value, err := time.Parse("2006-01-02", buffer[begin:end])
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end]))
|
||||
}
|
||||
if !match(tag, op, reflect.ValueOf(value), tags) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// match returns true if the given triplet (tag, operator, operand) matches any tag.
|
||||
//
|
||||
// First, it looks up the tag in tags and if it finds one, tries to compare the
|
||||
// value from it to the operand using the operator.
|
||||
//
|
||||
// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" }
|
||||
func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) bool {
|
||||
// look up the tag from the query in tags
|
||||
value, ok := tags.Get(tag)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
switch operand.Kind() {
|
||||
case reflect.Struct: // time
|
||||
operandAsTime := operand.Interface().(time.Time)
|
||||
v, ok := value.(time.Time)
|
||||
if !ok { // if value from tags is not time.Time
|
||||
return false
|
||||
}
|
||||
switch op {
|
||||
case OpLessEqual:
|
||||
return v.Before(operandAsTime) || v.Equal(operandAsTime)
|
||||
case OpGreaterEqual:
|
||||
return v.Equal(operandAsTime) || v.After(operandAsTime)
|
||||
case OpLess:
|
||||
return v.Before(operandAsTime)
|
||||
case OpGreater:
|
||||
return v.After(operandAsTime)
|
||||
case OpEqual:
|
||||
return v.Equal(operandAsTime)
|
||||
}
|
||||
case reflect.Float64:
|
||||
operandFloat64 := operand.Interface().(float64)
|
||||
var v float64
|
||||
// try our best to convert value from tags to float64
|
||||
switch vt := value.(type) {
|
||||
case float64:
|
||||
v = vt
|
||||
case float32:
|
||||
v = float64(vt)
|
||||
case int:
|
||||
v = float64(vt)
|
||||
case int8:
|
||||
v = float64(vt)
|
||||
case int16:
|
||||
v = float64(vt)
|
||||
case int32:
|
||||
v = float64(vt)
|
||||
case int64:
|
||||
v = float64(vt)
|
||||
default: // fail for all other types
|
||||
panic(fmt.Sprintf("Incomparable types: %T (%v) vs float64 (%v)", value, value, operandFloat64))
|
||||
}
|
||||
switch op {
|
||||
case OpLessEqual:
|
||||
return v <= operandFloat64
|
||||
case OpGreaterEqual:
|
||||
return v >= operandFloat64
|
||||
case OpLess:
|
||||
return v < operandFloat64
|
||||
case OpGreater:
|
||||
return v > operandFloat64
|
||||
case OpEqual:
|
||||
return v == operandFloat64
|
||||
}
|
||||
case reflect.Int64:
|
||||
operandInt := operand.Interface().(int64)
|
||||
var v int64
|
||||
// try our best to convert value from tags to int64
|
||||
switch vt := value.(type) {
|
||||
case int64:
|
||||
v = vt
|
||||
case int8:
|
||||
v = int64(vt)
|
||||
case int16:
|
||||
v = int64(vt)
|
||||
case int32:
|
||||
v = int64(vt)
|
||||
case int:
|
||||
v = int64(vt)
|
||||
case float64:
|
||||
v = int64(vt)
|
||||
case float32:
|
||||
v = int64(vt)
|
||||
default: // fail for all other types
|
||||
panic(fmt.Sprintf("Incomparable types: %T (%v) vs int64 (%v)", value, value, operandInt))
|
||||
}
|
||||
switch op {
|
||||
case OpLessEqual:
|
||||
return v <= operandInt
|
||||
case OpGreaterEqual:
|
||||
return v >= operandInt
|
||||
case OpLess:
|
||||
return v < operandInt
|
||||
case OpGreater:
|
||||
return v > operandInt
|
||||
case OpEqual:
|
||||
return v == operandInt
|
||||
}
|
||||
case reflect.String:
|
||||
v, ok := value.(string)
|
||||
if !ok { // if value from tags is not string
|
||||
return false
|
||||
}
|
||||
switch op {
|
||||
case OpEqual:
|
||||
return v == operand.String()
|
||||
case OpContains:
|
||||
return strings.Contains(v, operand.String())
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown kind of operand %v", operand.Kind()))
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
package query
|
||||
|
||||
type QueryParser Peg {
|
||||
}
|
||||
|
||||
e <- '\"' condition ( ' '+ and ' '+ condition )* '\"' !.
|
||||
|
||||
condition <- tag ' '* (le ' '* (number / time / date)
|
||||
/ ge ' '* (number / time / date)
|
||||
/ l ' '* (number / time / date)
|
||||
/ g ' '* (number / time / date)
|
||||
/ equal ' '* (number / time / date / value)
|
||||
/ contains ' '* value
|
||||
)
|
||||
|
||||
tag <- < (![ \t\n\r\\()"'=><] .)+ >
|
||||
value <- < '\'' (!["'] .)* '\''>
|
||||
number <- < ('0'
|
||||
/ [1-9] digit* ('.' digit*)?) >
|
||||
digit <- [0-9]
|
||||
time <- "TIME " < year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit (('-' / '+') digit digit ':' digit digit / 'Z') >
|
||||
date <- "DATE " < year '-' month '-' day >
|
||||
year <- ('1' / '2') digit digit digit
|
||||
month <- ('0' / '1') digit
|
||||
day <- ('0' / '1' / '2' / '3') digit
|
||||
and <- "AND"
|
||||
|
||||
equal <- "="
|
||||
contains <- "CONTAINS"
|
||||
le <- "<="
|
||||
ge <- ">="
|
||||
l <- "<"
|
||||
g <- ">"
|
File diff suppressed because it is too large
Load Diff
|
@ -1,86 +0,0 @@
|
|||
package query_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tmlibs/pubsub"
|
||||
"github.com/tendermint/tmlibs/pubsub/query"
|
||||
)
|
||||
|
||||
func TestMatches(t *testing.T) {
|
||||
const shortForm = "2006-Jan-02"
|
||||
txDate, err := time.Parse(shortForm, "2017-Jan-01")
|
||||
require.NoError(t, err)
|
||||
txTime, err := time.Parse(time.RFC3339, "2018-05-03T14:45:00Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
s string
|
||||
tags map[string]interface{}
|
||||
err bool
|
||||
matches bool
|
||||
}{
|
||||
{"tm.events.type='NewBlock'", map[string]interface{}{"tm.events.type": "NewBlock"}, false, true},
|
||||
|
||||
{"tx.gas > 7", map[string]interface{}{"tx.gas": 8}, false, true},
|
||||
{"tx.gas > 7 AND tx.gas < 9", map[string]interface{}{"tx.gas": 8}, false, true},
|
||||
{"body.weight >= 3.5", map[string]interface{}{"body.weight": 3.5}, false, true},
|
||||
{"account.balance < 1000.0", map[string]interface{}{"account.balance": 900}, false, true},
|
||||
{"apples.kg <= 4", map[string]interface{}{"apples.kg": 4.0}, false, true},
|
||||
{"body.weight >= 4.5", map[string]interface{}{"body.weight": float32(4.5)}, false, true},
|
||||
{"oranges.kg < 4 AND watermellons.kg > 10", map[string]interface{}{"oranges.kg": 3, "watermellons.kg": 12}, false, true},
|
||||
{"peaches.kg < 4", map[string]interface{}{"peaches.kg": 5}, false, false},
|
||||
|
||||
{"tx.date > DATE 2017-01-01", map[string]interface{}{"tx.date": time.Now()}, false, true},
|
||||
{"tx.date = DATE 2017-01-01", map[string]interface{}{"tx.date": txDate}, false, true},
|
||||
{"tx.date = DATE 2018-01-01", map[string]interface{}{"tx.date": txDate}, false, false},
|
||||
|
||||
{"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]interface{}{"tx.time": time.Now()}, false, true},
|
||||
{"tx.time = TIME 2013-05-03T14:45:00Z", map[string]interface{}{"tx.time": txTime}, false, false},
|
||||
|
||||
{"abci.owner.name CONTAINS 'Igor'", map[string]interface{}{"abci.owner.name": "Igor,Ivan"}, false, true},
|
||||
{"abci.owner.name CONTAINS 'Igor'", map[string]interface{}{"abci.owner.name": "Pavel,Ivan"}, false, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
q, err := query.New(tc.s)
|
||||
if !tc.err {
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
if tc.matches {
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should match %v", tc.s, tc.tags)
|
||||
} else {
|
||||
assert.False(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should not match %v", tc.s, tc.tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMustParse(t *testing.T) {
|
||||
assert.Panics(t, func() { query.MustParse("=") })
|
||||
assert.NotPanics(t, func() { query.MustParse("tm.events.type='NewBlock'") })
|
||||
}
|
||||
|
||||
func TestConditions(t *testing.T) {
|
||||
txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
s string
|
||||
conditions []query.Condition
|
||||
}{
|
||||
{s: "tm.events.type='NewBlock'", conditions: []query.Condition{query.Condition{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}}},
|
||||
{s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{query.Condition{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, query.Condition{Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)}}},
|
||||
{s: "tx.time >= TIME 2013-05-03T14:45:00Z", conditions: []query.Condition{query.Condition{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}}},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
q, err := query.New(tc.s)
|
||||
require.Nil(t, err)
|
||||
|
||||
assert.Equal(t, tc.conditions, q.Conditions())
|
||||
}
|
||||
}
|
6
test.sh
6
test.sh
|
@ -4,6 +4,9 @@ set -e
|
|||
# run the linter
|
||||
# make metalinter_test
|
||||
|
||||
# setup certs
|
||||
make gen_certs
|
||||
|
||||
# run the unit tests with coverage
|
||||
echo "" > coverage.txt
|
||||
for d in $(go list ./... | grep -v vendor); do
|
||||
|
@ -13,3 +16,6 @@ for d in $(go list ./... | grep -v vendor); do
|
|||
rm profile.out
|
||||
fi
|
||||
done
|
||||
|
||||
# cleanup certs
|
||||
make clean_certs
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
package version
|
||||
|
||||
const Version = "0.8.4"
|
||||
const Version = "0.9.0"
|
||||
|
|
Loading…
Reference in New Issue