move block cache from memory to files
This commit is contained in:
parent
f4ad7146d6
commit
1ff6ea4091
|
@ -1,5 +1,7 @@
|
|||
first-make-timestamp
|
||||
lightwalletd
|
||||
db-*-blocks
|
||||
db-*-lengths
|
||||
server.log
|
||||
coverage.out
|
||||
test-log
|
||||
|
|
23
README.md
23
README.md
|
@ -105,6 +105,29 @@ Example using server binary built from Makefile:
|
|||
./server --tls-cert cert.pem --tls-key key.pem --conf-file /home/zcash/.zcash/zcash.conf --log-file /logs/server.log --bind-addr 127.0.0.1:18232
|
||||
```
|
||||
|
||||
## Block cache
|
||||
|
||||
Lightwalletd caches all blocks from Sapling activation up to the
|
||||
most recent block, which takes about an hour the first time you run
|
||||
lightwalletd. During this syncing, lightwalletd is fully available; the
|
||||
only effect of being in download mode is that block fetches are slower.
|
||||
|
||||
After syncing, lightwalletd will start almost immediately,
|
||||
because the blocks are cached in local files (by default, within
|
||||
`/var/lib/lightwalletd/db`; you can specify a different location using
|
||||
the `--data-dir` command-line option).
|
||||
|
||||
Lightwalletd checks the consistency of these files at startup and during
|
||||
operation, as might be caused by an unclean shutdown, and if it detects
|
||||
corruption, it will recreate the cache by re-downloading all blocks
|
||||
from `zcashd` requiring an hour again, but this should occur extremely
|
||||
rarely.
|
||||
|
||||
If lightwalletd detects corruption in these cache files, it will log
|
||||
a message containing the string `CORRUPTION` and also indicate the
|
||||
nature of the corruption.
|
||||
|
||||
|
||||
# Pull Requests
|
||||
|
||||
We welcome pull requests! We like to keep our Go code neatly formatted in a standard way,
|
||||
|
|
41
cmd/root.go
41
cmd/root.go
|
@ -5,6 +5,7 @@ import (
|
|||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
@ -40,7 +41,8 @@ var rootCmd = &cobra.Command{
|
|||
LogFile: viper.GetString("log-file"),
|
||||
ZcashConfPath: viper.GetString("zcash-conf-path"),
|
||||
NoTLSVeryInsecure: viper.GetBool("no-tls-very-insecure"),
|
||||
CacheSize: viper.GetInt("cache-size"),
|
||||
DataDir: viper.GetString("data-dir"),
|
||||
Redownload: viper.GetBool("redownload"),
|
||||
}
|
||||
|
||||
common.Log.Debugf("Options: %#v\n", opts)
|
||||
|
@ -52,10 +54,10 @@ var rootCmd = &cobra.Command{
|
|||
opts.ZcashConfPath,
|
||||
}
|
||||
|
||||
if !fileExists(opts.LogFile) {
|
||||
os.OpenFile(opts.LogFile, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
|
||||
}
|
||||
for _, filename := range filesThatShouldExist {
|
||||
if !fileExists(opts.LogFile) {
|
||||
os.OpenFile(opts.LogFile, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
|
||||
}
|
||||
if opts.NoTLSVeryInsecure && (filename == opts.TLSCertPath || filename == opts.TLSKeyPath) {
|
||||
continue
|
||||
}
|
||||
|
@ -138,18 +140,19 @@ func startServer(opts *common.Options) error {
|
|||
// Get the sapling activation height from the RPC
|
||||
// (this first RPC also verifies that we can communicate with zcashd)
|
||||
saplingHeight, blockHeight, chainName, branchID := common.GetSaplingInfo()
|
||||
common.Log.Info("Got sapling height ", saplingHeight, " chain ", chainName, " branchID ", branchID)
|
||||
common.Log.Info("Got sapling height ", saplingHeight, " block height ", blockHeight, " chain ", chainName, " branchID ", branchID)
|
||||
|
||||
// Initialize the cache
|
||||
cache := common.NewBlockCache(opts.CacheSize)
|
||||
|
||||
// Start the block cache importer at cacheSize blocks before current height
|
||||
cacheStart := blockHeight - opts.CacheSize
|
||||
if cacheStart < saplingHeight {
|
||||
cacheStart = saplingHeight
|
||||
if err := os.MkdirAll(opts.DataDir, 0755); err != nil {
|
||||
os.Stderr.WriteString(fmt.Sprintf("\n ** Can't create data directory: %s\n\n", opts.DataDir))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
go common.BlockIngestor(cache, cacheStart, 0 /*loop forever*/)
|
||||
dbPath := filepath.Join(opts.DataDir, "db")
|
||||
if err := os.MkdirAll(dbPath, 0755); err != nil {
|
||||
os.Stderr.WriteString(fmt.Sprintf("\n ** Can't create db directory: %s\n\n", dbPath))
|
||||
os.Exit(1)
|
||||
}
|
||||
cache := common.NewBlockCache(dbPath, chainName, saplingHeight, opts.Redownload)
|
||||
go common.BlockIngestor(cache, 0 /*loop forever*/)
|
||||
|
||||
// Compact transaction service initialization
|
||||
service, err := frontend.NewLwdStreamer(cache)
|
||||
|
@ -176,6 +179,7 @@ func startServer(opts *common.Options) error {
|
|||
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
s := <-signals
|
||||
cache.Sync()
|
||||
common.Log.WithFields(logrus.Fields{
|
||||
"signal": s.String(),
|
||||
}).Info("caught signal, stopping gRPC server")
|
||||
|
@ -217,7 +221,8 @@ func init() {
|
|||
rootCmd.Flags().String("log-file", "./server.log", "log file to write to")
|
||||
rootCmd.Flags().String("zcash-conf-path", "./zcash.conf", "conf file to pull RPC creds from")
|
||||
rootCmd.Flags().Bool("no-tls-very-insecure", false, "run without the required TLS certificate, only for debugging, DO NOT use in production")
|
||||
rootCmd.Flags().Int("cache-size", 80000, "number of blocks to hold in the cache")
|
||||
rootCmd.Flags().Bool("redownload", false, "re-fetch all blocks from zcashd; reinitialize local cache files")
|
||||
rootCmd.Flags().String("data-dir", "/var/lib/lightwalletd", "data directory (such as db)")
|
||||
|
||||
viper.BindPFlag("bind-addr", rootCmd.Flags().Lookup("bind-addr"))
|
||||
viper.SetDefault("bind-addr", "127.0.0.1:9067")
|
||||
|
@ -233,8 +238,10 @@ func init() {
|
|||
viper.SetDefault("zcash-conf-path", "./zcash.conf")
|
||||
viper.BindPFlag("no-tls-very-insecure", rootCmd.Flags().Lookup("no-tls-very-insecure"))
|
||||
viper.SetDefault("no-tls-very-insecure", false)
|
||||
viper.BindPFlag("cache-size", rootCmd.Flags().Lookup("cache-size"))
|
||||
viper.SetDefault("cache-size", 80000)
|
||||
viper.BindPFlag("redownload", rootCmd.Flags().Lookup("redownload"))
|
||||
viper.SetDefault("redownload", false)
|
||||
viper.BindPFlag("data-dir", rootCmd.Flags().Lookup("data-dir"))
|
||||
viper.SetDefault("data-dir", "/var/lib/lightwalletd")
|
||||
|
||||
logger.SetFormatter(&logrus.TextFormatter{
|
||||
//DisableColors: true,
|
||||
|
|
330
common/cache.go
330
common/cache.go
|
@ -5,6 +5,11 @@ package common
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"hash/fnv"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
@ -13,90 +18,276 @@ import (
|
|||
|
||||
type blockCacheEntry struct {
|
||||
data []byte
|
||||
hash []byte
|
||||
}
|
||||
|
||||
// BlockCache contains a set of recent compact blocks in marshalled form.
|
||||
// BlockCache contains a consecutive set of recent compact blocks in marshalled form.
|
||||
type BlockCache struct {
|
||||
MaxEntries int
|
||||
lengthsName, blocksName string // pathnames
|
||||
lengthsFile, blocksFile *os.File
|
||||
starts []int64 // Starting offset of each block within blocksFile
|
||||
firstBlock int // height of the first block in the cache (usually Sapling activation)
|
||||
nextBlock int // height of the first block not in the cache
|
||||
latestHash []byte // hash of the most recent (highest height) block, for detecting reorgs.
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// m[firstBlock..nextBlock) are valid
|
||||
m map[int]*blockCacheEntry
|
||||
firstBlock int
|
||||
nextBlock int
|
||||
func (c *BlockCache) GetNextHeight() int {
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
return c.nextBlock
|
||||
}
|
||||
|
||||
mutex sync.RWMutex
|
||||
func (c *BlockCache) GetLatestHash() []byte {
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
return c.latestHash
|
||||
}
|
||||
|
||||
// HashMismatch indicates if the given prev-hash doesn't match the most recent block's hash
|
||||
// so reorgs can be detected.
|
||||
func (c *BlockCache) HashMismatch(prevhash []byte) bool {
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
return c.latestHash != nil && !bytes.Equal(c.latestHash, prevhash)
|
||||
}
|
||||
|
||||
func (c *BlockCache) setDbFiles(height int) {
|
||||
index := height - c.firstBlock
|
||||
if err := c.lengthsFile.Truncate(int64(index * 4)); err != nil {
|
||||
Log.Fatal("truncate lengths file failed: ", err)
|
||||
}
|
||||
if err := c.blocksFile.Truncate(c.starts[index]); err != nil {
|
||||
Log.Fatal("truncate blocks file failed: ", err)
|
||||
}
|
||||
c.Sync()
|
||||
c.starts = c.starts[:index+1]
|
||||
c.nextBlock = height
|
||||
c.setLatestHash()
|
||||
}
|
||||
|
||||
func (c *BlockCache) recoverFromCorruption(height int) {
|
||||
Log.Warning("CORRUPTION detected in db blocks-cache files, height ", height, " redownloading")
|
||||
c.setDbFiles(height)
|
||||
}
|
||||
|
||||
// not including the checksum
|
||||
func (c *BlockCache) blockLength(height int) int {
|
||||
index := height - c.firstBlock
|
||||
return int(c.starts[index+1] - c.starts[index] - 8)
|
||||
}
|
||||
|
||||
// Calculate the 8-byte checksum that precedes each block in the blocks file.
|
||||
func checksum(height int, b []byte) []byte {
|
||||
h := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(h, uint64(height))
|
||||
cs := fnv.New64a()
|
||||
cs.Write(h)
|
||||
cs.Write(b)
|
||||
return cs.Sum(nil)
|
||||
}
|
||||
|
||||
func (c *BlockCache) readBlock(height int) *walletrpc.CompactBlock {
|
||||
blockLen := c.blockLength(height)
|
||||
b := make([]byte, blockLen+8)
|
||||
offset := c.starts[height-c.firstBlock]
|
||||
n, err := c.blocksFile.ReadAt(b, offset)
|
||||
if err != nil || n != len(b) {
|
||||
Log.Warning("blocks read offset: ", offset, " failed: ", n, err)
|
||||
return nil
|
||||
}
|
||||
diskcs := b[:8]
|
||||
b = b[8 : blockLen+8]
|
||||
if !bytes.Equal(checksum(height, b), diskcs) {
|
||||
Log.Warning("bad block checksum at height: ", height, " offset: ", offset)
|
||||
return nil
|
||||
}
|
||||
block := &walletrpc.CompactBlock{}
|
||||
err = proto.Unmarshal(b, block)
|
||||
if err != nil {
|
||||
// Could be file corruption.
|
||||
Log.Warning("blocks unmarshal at offset: ", offset, " failed: ", err)
|
||||
return nil
|
||||
}
|
||||
if int(block.Height) != height {
|
||||
// Could be file corruption.
|
||||
Log.Warning("block unexpected height at height ", height, " offset: ", offset)
|
||||
return nil
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
func (c *BlockCache) setLatestHash() {
|
||||
c.latestHash = nil
|
||||
// There is at least one block; get the last block's hash
|
||||
if c.nextBlock > c.firstBlock {
|
||||
// At least one block remains; get the last block's hash
|
||||
block := c.readBlock(c.nextBlock - 1)
|
||||
if block == nil {
|
||||
c.recoverFromCorruption(c.nextBlock - 1)
|
||||
return
|
||||
}
|
||||
c.latestHash = make([]byte, len(block.Hash))
|
||||
copy(c.latestHash, block.Hash)
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlockCache returns an instance of a block cache object.
|
||||
func NewBlockCache(maxEntries int) *BlockCache {
|
||||
return &BlockCache{
|
||||
MaxEntries: maxEntries,
|
||||
m: make(map[int]*blockCacheEntry),
|
||||
func NewBlockCache(dbPath string, chainName string, startHeight int, redownload bool) *BlockCache {
|
||||
c := &BlockCache{}
|
||||
c.firstBlock = startHeight
|
||||
c.nextBlock = startHeight
|
||||
c.lengthsName, c.blocksName = dbFileNames(dbPath, chainName)
|
||||
var err error
|
||||
if err := os.MkdirAll(filepath.Join(dbPath, chainName), 0755); err != nil {
|
||||
Log.Fatal("mkdir ", dbPath, " failed: ", err)
|
||||
}
|
||||
c.blocksFile, err = os.OpenFile(c.blocksName, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
Log.Fatal("open ", c.blocksName, " failed: ", err)
|
||||
}
|
||||
c.lengthsFile, err = os.OpenFile(c.lengthsName, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
Log.Fatal("open ", c.lengthsName, " failed: ", err)
|
||||
}
|
||||
if redownload {
|
||||
if err := c.lengthsFile.Truncate(0); err != nil {
|
||||
Log.Fatal("truncate lengths file failed: ", err)
|
||||
}
|
||||
if err := c.blocksFile.Truncate(0); err != nil {
|
||||
Log.Fatal("truncate blocks file failed: ", err)
|
||||
}
|
||||
}
|
||||
lengths, err := ioutil.ReadFile(c.lengthsName)
|
||||
if err != nil {
|
||||
Log.Fatal("read ", c.lengthsName, " failed: ", err)
|
||||
}
|
||||
|
||||
// The last entry in starts[] is where to write the next block.
|
||||
var offset int64
|
||||
c.starts = nil
|
||||
c.starts = append(c.starts, 0)
|
||||
for i := 0; i < len(lengths)/4; i++ {
|
||||
if len(lengths[:4]) < 4 {
|
||||
Log.Warning("lengths file has a partial entry")
|
||||
c.recoverFromCorruption(c.nextBlock)
|
||||
break
|
||||
}
|
||||
length := binary.LittleEndian.Uint32(lengths[i*4 : (i+1)*4])
|
||||
if length < 78 || length > 4*1000*1000 {
|
||||
Log.Warning("lengths file has impossible value ", length)
|
||||
c.recoverFromCorruption(c.nextBlock)
|
||||
break
|
||||
}
|
||||
offset += int64(length) + 8
|
||||
c.starts = append(c.starts, offset)
|
||||
// Check for corruption.
|
||||
block := c.readBlock(c.nextBlock)
|
||||
if block == nil {
|
||||
Log.Warning("error reading block")
|
||||
c.recoverFromCorruption(c.nextBlock)
|
||||
break
|
||||
}
|
||||
c.nextBlock++
|
||||
}
|
||||
c.setDbFiles(c.nextBlock)
|
||||
Log.Info("Found ", c.nextBlock-c.firstBlock, " blocks in cache")
|
||||
return c
|
||||
}
|
||||
|
||||
func dbFileNames(dbPath string, chainName string) (string, string) {
|
||||
return filepath.Join(dbPath, chainName, "lengths"),
|
||||
filepath.Join(dbPath, chainName, "blocks")
|
||||
}
|
||||
|
||||
// Add adds the given block to the cache at the given height, returning true
|
||||
// if a reorg was detected.
|
||||
func (c *BlockCache) Add(height int, block *walletrpc.CompactBlock) (bool, error) {
|
||||
func (c *BlockCache) Add(height int, block *walletrpc.CompactBlock) error {
|
||||
// Invariant: m[firstBlock..nextBlock) are valid.
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
if height > c.nextBlock {
|
||||
// restarting the cache (never happens currently), or first time
|
||||
for i := c.firstBlock; i < c.nextBlock; i++ {
|
||||
delete(c.m, i)
|
||||
}
|
||||
c.firstBlock = height
|
||||
c.nextBlock = height
|
||||
// Cache has been reset (for example, checksum error)
|
||||
return nil
|
||||
}
|
||||
// Invariant: m[firstBlock..nextBlock) are valid.
|
||||
if height < c.firstBlock {
|
||||
// Should never try to add a block before Sapling activation height
|
||||
Log.Fatal("cache.Add height below Sapling: ", height)
|
||||
return nil
|
||||
}
|
||||
if height < c.nextBlock {
|
||||
// Should never try to "backup" (call Reorg() instead).
|
||||
Log.Fatal("cache.Add height going backwards: ", height)
|
||||
return nil
|
||||
}
|
||||
bheight := int(block.Height)
|
||||
|
||||
// If we already have this block, a reorg must have occurred;
|
||||
// this block (and all higher) must be re-added.
|
||||
h := height
|
||||
if h < c.firstBlock {
|
||||
h = c.firstBlock
|
||||
}
|
||||
for i := h; i < c.nextBlock; i++ {
|
||||
delete(c.m, i)
|
||||
}
|
||||
c.nextBlock = height
|
||||
if c.firstBlock > c.nextBlock {
|
||||
c.firstBlock = c.nextBlock
|
||||
}
|
||||
// Invariant: m[firstBlock..nextBlock) are valid.
|
||||
|
||||
// Detect reorg, ingestor needs to handle it
|
||||
if height > c.firstBlock && !bytes.Equal(block.PrevHash, c.m[height-1].hash) {
|
||||
return true, nil
|
||||
// TODO COINBASE-HEIGHT: restore this check after coinbase height is fixed
|
||||
if false && bheight != height {
|
||||
// This could only happen if zcashd returned the wrong
|
||||
// block (not the height we requested).
|
||||
Log.Fatal("cache.Add wrong height: ", bheight, " expecting: ", height)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the entry and update the counters
|
||||
// Add the new block and its length to the db files.
|
||||
data, err := proto.Marshal(block)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
c.m[height] = &blockCacheEntry{
|
||||
data: data,
|
||||
hash: block.GetHash(),
|
||||
_, err = c.blocksFile.Write(append(checksum(height, data), data...))
|
||||
if err != nil {
|
||||
Log.Fatal("blocks write failed: ", err)
|
||||
}
|
||||
b := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(b, uint32(len(data)))
|
||||
_, err = c.lengthsFile.Write(b)
|
||||
if err != nil {
|
||||
Log.Fatal("lengths write failed: ", err)
|
||||
}
|
||||
|
||||
// update the in-memory variables
|
||||
offset := c.starts[len(c.starts)-1]
|
||||
c.starts = append(c.starts, offset+int64(len(data)+8))
|
||||
|
||||
if c.latestHash == nil {
|
||||
c.latestHash = make([]byte, len(block.Hash))
|
||||
}
|
||||
copy(c.latestHash, block.Hash)
|
||||
c.nextBlock++
|
||||
// Invariant: m[firstBlock..nextBlock) are valid.
|
||||
|
||||
// remove any blocks that are older than the capacity of the cache
|
||||
for c.firstBlock < c.nextBlock-c.MaxEntries {
|
||||
// Invariant: m[firstBlock..nextBlock) are valid.
|
||||
delete(c.m, c.firstBlock)
|
||||
c.firstBlock++
|
||||
}
|
||||
// Invariant: m[firstBlock..nextBlock) are valid.
|
||||
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the compact block at the requested height if it is
|
||||
// Reorg resets nextBlock (the block that should be Add()ed next)
|
||||
// downward to the given height.
|
||||
func (c *BlockCache) Reorg(height int) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
// Allow the caller not to have to worry about Sapling start height.
|
||||
if height < c.firstBlock {
|
||||
height = c.firstBlock
|
||||
}
|
||||
if height >= c.nextBlock {
|
||||
// Timing window, ignore this request
|
||||
return
|
||||
}
|
||||
// Remove the end of the cache.
|
||||
c.nextBlock = height
|
||||
newCacheLen := height - c.firstBlock
|
||||
c.starts = c.starts[:newCacheLen+1]
|
||||
|
||||
if err := c.lengthsFile.Truncate(int64(4 * newCacheLen)); err != nil {
|
||||
Log.Fatal("truncate failed: ", err)
|
||||
}
|
||||
if err := c.blocksFile.Truncate(c.starts[newCacheLen]); err != nil {
|
||||
Log.Fatal("truncate failed: ", err)
|
||||
}
|
||||
c.setLatestHash()
|
||||
}
|
||||
|
||||
// Get returns the compact block at the requested height if it's
|
||||
// in the cache, else nil.
|
||||
func (c *BlockCache) Get(height int) *walletrpc.CompactBlock {
|
||||
c.mutex.RLock()
|
||||
|
@ -105,18 +296,15 @@ func (c *BlockCache) Get(height int) *walletrpc.CompactBlock {
|
|||
if height < c.firstBlock || height >= c.nextBlock {
|
||||
return nil
|
||||
}
|
||||
|
||||
serialized := &walletrpc.CompactBlock{}
|
||||
err := proto.Unmarshal(c.m[height].data, serialized)
|
||||
if err != nil {
|
||||
println("Error unmarshalling compact block")
|
||||
block := c.readBlock(height)
|
||||
if block == nil {
|
||||
c.recoverFromCorruption(height)
|
||||
return nil
|
||||
}
|
||||
|
||||
return serialized
|
||||
return block
|
||||
}
|
||||
|
||||
// GetLatestHeight returns the block with the greatest height, or nil
|
||||
// GetLatestHeight returns the height of the most recent block, or -1
|
||||
// if the cache is empty.
|
||||
func (c *BlockCache) GetLatestHeight() int {
|
||||
c.mutex.RLock()
|
||||
|
@ -126,3 +314,21 @@ func (c *BlockCache) GetLatestHeight() int {
|
|||
}
|
||||
return c.nextBlock - 1
|
||||
}
|
||||
|
||||
func (c *BlockCache) Sync() {
|
||||
c.lengthsFile.Sync()
|
||||
c.blocksFile.Sync()
|
||||
}
|
||||
|
||||
// Currently used only for testing.
|
||||
func (c *BlockCache) Close() {
|
||||
// Some operating system require you to close files before you can remove them.
|
||||
if c.lengthsFile != nil {
|
||||
c.lengthsFile.Close()
|
||||
c.lengthsFile = nil
|
||||
}
|
||||
if c.blocksFile != nil {
|
||||
c.blocksFile.Close()
|
||||
c.blocksFile = nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,12 +7,21 @@ import (
|
|||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/zcash/lightwalletd/parser"
|
||||
"github.com/zcash/lightwalletd/walletrpc"
|
||||
)
|
||||
|
||||
var compacts []*walletrpc.CompactBlock
|
||||
var cache *BlockCache
|
||||
|
||||
const (
|
||||
unitTestPath = "unittestcache"
|
||||
unitTestChain = "unittestnet"
|
||||
)
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
type compactTest struct {
|
||||
BlockHeight int `json:"block"`
|
||||
|
@ -22,7 +31,6 @@ func TestCache(t *testing.T) {
|
|||
Compact string `json:"compact"`
|
||||
}
|
||||
var compactTests []compactTest
|
||||
var compacts []*walletrpc.CompactBlock
|
||||
|
||||
blockJSON, err := ioutil.ReadFile("../testdata/compact_blocks.json")
|
||||
if err != nil {
|
||||
|
@ -33,9 +41,8 @@ func TestCache(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cache := NewBlockCache(4)
|
||||
|
||||
// derive compact blocks from file data (setup, not part of the test)
|
||||
// Derive compact blocks from file data (setup, not part of the test).
|
||||
for _, test := range compactTests {
|
||||
blockData, _ := hex.DecodeString(test.Full)
|
||||
block := parser.NewBlock()
|
||||
|
@ -46,212 +53,129 @@ func TestCache(t *testing.T) {
|
|||
compacts = append(compacts, block.ToCompact())
|
||||
}
|
||||
|
||||
// initially empty cache
|
||||
// Pretend Sapling starts at 289460.
|
||||
os.RemoveAll(unitTestPath)
|
||||
cache = NewBlockCache(unitTestPath, unitTestChain, 289460, true)
|
||||
|
||||
// Initially cache is empty.
|
||||
if cache.GetLatestHeight() != -1 {
|
||||
t.Fatal("unexpected GetLatestHeight")
|
||||
}
|
||||
|
||||
// Test handling an invalid block (nil will do)
|
||||
reorg, err := cache.Add(21, nil)
|
||||
if err == nil {
|
||||
t.Error("expected error:", err)
|
||||
if cache.firstBlock != 289460 {
|
||||
t.Fatal("unexpected initial firstBlock")
|
||||
}
|
||||
if reorg {
|
||||
t.Fatal("unexpected reorg")
|
||||
if cache.nextBlock != 289460 {
|
||||
t.Fatal("unexpected initial nextBlock")
|
||||
}
|
||||
fillCache(t)
|
||||
reorgCache(t)
|
||||
fillCache(t)
|
||||
|
||||
// Simulate a restart to ensure the db files are read correctly.
|
||||
cache = NewBlockCache(unitTestPath, unitTestChain, 289460, false)
|
||||
|
||||
// Should still be 6 blocks.
|
||||
if cache.nextBlock != 289466 {
|
||||
t.Fatal("unexpected nextBlock height")
|
||||
}
|
||||
reorgCache(t)
|
||||
|
||||
// Reorg to before the first block moves back to only the first block
|
||||
cache.Reorg(289459)
|
||||
if cache.latestHash != nil {
|
||||
t.Fatal("unexpected latestHash, should be nil")
|
||||
}
|
||||
if cache.nextBlock != 289460 {
|
||||
t.Fatal("unexpected nextBlock: ", cache.nextBlock)
|
||||
}
|
||||
|
||||
// normal, sunny-day case, 6 blocks, add as blocks 10-15
|
||||
// Clean up the test files.
|
||||
cache.Close()
|
||||
os.RemoveAll(unitTestPath)
|
||||
}
|
||||
|
||||
func reorgCache(t *testing.T) {
|
||||
// Simulate a reorg by adding a block whose height is lower than the latest;
|
||||
// we're replacing the second block, so there should be only two blocks.
|
||||
cache.Reorg(289461)
|
||||
err := cache.Add(289461, compacts[1])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cache.firstBlock != 289460 {
|
||||
t.Fatal("unexpected firstBlock height")
|
||||
}
|
||||
if cache.nextBlock != 289462 {
|
||||
t.Fatal("unexpected nextBlock height")
|
||||
}
|
||||
if len(cache.starts) != 3 {
|
||||
t.Fatal("unexpected len(cache.starts)")
|
||||
}
|
||||
|
||||
// some "black-box" tests (using exported interfaces)
|
||||
if cache.GetLatestHeight() != 289461 {
|
||||
t.Fatal("unexpected GetLatestHeight")
|
||||
}
|
||||
if int(cache.Get(289461).Height) != 289461 {
|
||||
t.Fatal("unexpected block contents")
|
||||
}
|
||||
|
||||
// Make sure we can go forward from here
|
||||
err = cache.Add(289462, compacts[2])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cache.firstBlock != 289460 {
|
||||
t.Fatal("unexpected firstBlock height")
|
||||
}
|
||||
if cache.nextBlock != 289463 {
|
||||
t.Fatal("unexpected nextBlock height")
|
||||
}
|
||||
if len(cache.starts) != 4 {
|
||||
t.Fatal("unexpected len(cache.starts)")
|
||||
}
|
||||
|
||||
if cache.GetLatestHeight() != 289462 {
|
||||
t.Fatal("unexpected GetLatestHeight")
|
||||
}
|
||||
if int(cache.Get(289462).Height) != 289462 {
|
||||
t.Fatal("unexpected block contents")
|
||||
}
|
||||
}
|
||||
|
||||
// Whatever the state of the cache, add 6 blocks starting at the
|
||||
// pretend Sapling height, 289460 (this could cause a reorg).
|
||||
func fillCache(t *testing.T) {
|
||||
next := 289460
|
||||
cache.Reorg(next)
|
||||
for i, compact := range compacts {
|
||||
reorg, err = cache.Add(10+i, compact)
|
||||
err := cache.Add(next, compact)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if reorg {
|
||||
t.Fatal("unexpected reorg")
|
||||
next++
|
||||
|
||||
// some "white-box" checks
|
||||
if cache.firstBlock != 289460 {
|
||||
t.Fatal("unexpected firstBlock height")
|
||||
}
|
||||
if cache.GetLatestHeight() != 10+i {
|
||||
if cache.nextBlock != 289460+i+1 {
|
||||
t.Fatal("unexpected nextBlock height")
|
||||
}
|
||||
if len(cache.starts) != i+2 {
|
||||
t.Fatal("unexpected len(cache.starts)")
|
||||
}
|
||||
|
||||
// some "black-box" tests (using exported interfaces)
|
||||
if cache.GetLatestHeight() != 289460+i {
|
||||
t.Fatal("unexpected GetLatestHeight")
|
||||
}
|
||||
// The test blocks start at height 289460
|
||||
if int(cache.Get(10+i).Height) != 289460+i {
|
||||
b := cache.Get(289460 + i)
|
||||
if b == nil {
|
||||
t.Fatal("unexpected Get failure")
|
||||
}
|
||||
if int(b.Height) != 289460+i {
|
||||
t.Fatal("unexpected block contents")
|
||||
}
|
||||
}
|
||||
if len(cache.m) != 4 { // max entries is 4
|
||||
t.Fatal("unexpected number of cache entries")
|
||||
}
|
||||
if cache.firstBlock != 16-4 {
|
||||
t.Fatal("unexpected firstBlock")
|
||||
}
|
||||
if cache.nextBlock != 16 {
|
||||
t.Fatal("unexpected nextBlock")
|
||||
}
|
||||
|
||||
// No entries just before and just after the cache range
|
||||
if cache.Get(11) != nil || cache.Get(16) != nil {
|
||||
t.Fatal("unexpected Get")
|
||||
}
|
||||
|
||||
// We can re-add the last block (with the same data) and
|
||||
// that should just replace and not be considered a reorg
|
||||
reorg, err = cache.Add(15, compacts[5])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if reorg {
|
||||
t.Fatal("unexpected reorg")
|
||||
}
|
||||
if len(cache.m) != 4 {
|
||||
t.Fatal("unexpected number of blocks")
|
||||
}
|
||||
if cache.firstBlock != 16-4 {
|
||||
t.Fatal("unexpected firstBlock")
|
||||
}
|
||||
if cache.nextBlock != 16 {
|
||||
t.Fatal("unexpected nextBlock")
|
||||
}
|
||||
|
||||
// Simulate a reorg by resubmitting as the next block, 16, any block with
|
||||
// the wrote prev-hash (let's use the first, just because it's handy)
|
||||
reorg, err = cache.Add(16, compacts[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reorg {
|
||||
t.Fatal("unexpected non-reorg")
|
||||
}
|
||||
// The cache shouldn't have changed in any way
|
||||
if cache.Get(16) != nil {
|
||||
t.Fatal("unexpected block 16 exists")
|
||||
}
|
||||
if cache.GetLatestHeight() != 15 {
|
||||
t.Fatal("unexpected GetLatestHeight")
|
||||
}
|
||||
if int(cache.Get(15).Height) != 289460+5 {
|
||||
t.Fatal("unexpected Get")
|
||||
}
|
||||
if len(cache.m) != 4 {
|
||||
t.Fatal("unexpected number of cache entries")
|
||||
}
|
||||
|
||||
// In response to the reorg being detected, we must back up until we
|
||||
// reach a block that's before the reorg (where the chain split).
|
||||
// Let's back up one block, to height 15, request it from zcashd,
|
||||
// but let's say this block is from the new branch, so we haven't
|
||||
// gone back far enough, so this will still be disallowed.
|
||||
reorg, err = cache.Add(15, compacts[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reorg {
|
||||
t.Fatal("unexpected non-reorg")
|
||||
}
|
||||
// the cache deleted block 15 (it's definitely wrong)
|
||||
if cache.Get(15) != nil {
|
||||
t.Fatal("unexpected block 15 exists")
|
||||
}
|
||||
if cache.GetLatestHeight() != 14 {
|
||||
t.Fatal("unexpected GetLatestHeight")
|
||||
}
|
||||
if int(cache.Get(14).Height) != 289460+4 {
|
||||
t.Fatal("unexpected Get")
|
||||
}
|
||||
// now only 3 entries (12-14)
|
||||
if len(cache.m) != 3 {
|
||||
t.Fatal("unexpected number of cache entries")
|
||||
}
|
||||
|
||||
// Back up a couple more, try to re-add height 13, and suppose
|
||||
// that's before the split (for example, there were two 14s).
|
||||
// (In this test, we're replacing 13 with the same block; in
|
||||
// real life, we'd be replacing it with a different version of
|
||||
// 13 that has the same prev-hash).
|
||||
reorg, err = cache.Add(13, compacts[3])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if reorg {
|
||||
t.Fatal("unexpected reorg")
|
||||
}
|
||||
// 13 was replaced (with the same block), but that means
|
||||
// everything after 13 is deleted
|
||||
if cache.Get(14) != nil {
|
||||
t.Fatal("unexpected block 14 exists")
|
||||
}
|
||||
if cache.GetLatestHeight() != 13 {
|
||||
t.Fatal("unexpected GetLatestHeight")
|
||||
}
|
||||
if int(cache.Get(13).Height) != 289460+3 {
|
||||
t.Fatal("unexpected Get")
|
||||
}
|
||||
if int(cache.Get(12).Height) != 289460+2 {
|
||||
t.Fatal("unexpected Get")
|
||||
}
|
||||
// down to 2 entries (12-13)
|
||||
if len(cache.m) != 2 {
|
||||
t.Fatal("unexpected number of cache entries")
|
||||
}
|
||||
|
||||
// Now we can continue forward from here
|
||||
reorg, err = cache.Add(14, compacts[4])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if reorg {
|
||||
t.Fatal("unexpected reorg")
|
||||
}
|
||||
if cache.GetLatestHeight() != 14 {
|
||||
t.Fatal("unexpected GetLatestHeight")
|
||||
}
|
||||
if int(cache.Get(14).Height) != 289460+4 {
|
||||
t.Fatal("unexpected Get")
|
||||
}
|
||||
if len(cache.m) != 3 {
|
||||
t.Fatal("unexpected number of cache entries")
|
||||
}
|
||||
|
||||
// It's possible, although unlikely, that after a reorg is detected,
|
||||
// we back up so much that we're before the start of the cache
|
||||
// (especially if the cache is very small). This should remove the
|
||||
// entire cache before adding the new entry.
|
||||
if cache.firstBlock != 12 {
|
||||
t.Fatal("unexpected firstBlock")
|
||||
}
|
||||
reorg, err = cache.Add(10, compacts[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if reorg {
|
||||
t.Fatal("unexpected reorg")
|
||||
}
|
||||
if cache.GetLatestHeight() != 10 {
|
||||
t.Fatal("unexpected GetLatestHeight")
|
||||
}
|
||||
if int(cache.Get(10).Height) != 289460+0 {
|
||||
t.Fatal("unexpected Get")
|
||||
}
|
||||
if len(cache.m) != 1 {
|
||||
t.Fatal("unexpected number of cache entries")
|
||||
}
|
||||
|
||||
// Another weird case (not currently possible) is adding a block at
|
||||
// a height that is not one higher than the current latest block.
|
||||
// This should remove the entire cache before adding the new entry.
|
||||
reorg, err = cache.Add(20, compacts[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if reorg {
|
||||
t.Fatal("unexpected reorg")
|
||||
}
|
||||
if cache.GetLatestHeight() != 20 {
|
||||
t.Fatal("unexpected GetLatestHeight")
|
||||
}
|
||||
if int(cache.Get(20).Height) != 289460 {
|
||||
t.Fatal("unexpected Get")
|
||||
}
|
||||
if len(cache.m) != 1 {
|
||||
t.Fatal("unexpected number of cache entries")
|
||||
}
|
||||
// the cache deleted block 15 (it's definitely wrong)
|
||||
}
|
||||
|
|
113
common/common.go
113
common/common.go
|
@ -30,7 +30,8 @@ type Options struct {
|
|||
LogFile string `json:"log_file,omitempty"`
|
||||
ZcashConfPath string `json:"zcash_conf,omitempty"`
|
||||
NoTLSVeryInsecure bool `json:"no_tls_very_insecure,omitempty"`
|
||||
CacheSize int `json:"cache_size,omitempty"`
|
||||
Redownload bool `json:"redownload"`
|
||||
DataDir string `json:"data-dir"`
|
||||
}
|
||||
|
||||
// RawRequest points to the function to send a an RPC request to zcashd;
|
||||
|
@ -125,66 +126,102 @@ func getBlockFromRPC(height int) (*walletrpc.CompactBlock, error) {
|
|||
if len(rest) != 0 {
|
||||
return nil, errors.New("received overlong message")
|
||||
}
|
||||
// TODO COINBASE-HEIGHT: restore this check after coinbase height is fixed
|
||||
if false && block.GetHeight() != height {
|
||||
return nil, errors.New("received unexpected height block")
|
||||
}
|
||||
|
||||
return block.ToCompact(), nil
|
||||
}
|
||||
|
||||
// BlockIngestor runs as a goroutine and polls zcashd for new blocks, adding them
|
||||
// to the cache. The repetition count, rep, is nonzero only for unit-testing.
|
||||
func BlockIngestor(cache *BlockCache, startHeight int, rep int) {
|
||||
func BlockIngestor(c *BlockCache, rep int) {
|
||||
lastLog := time.Now()
|
||||
reorgCount := 0
|
||||
height := startHeight
|
||||
lastHeightReported := 0
|
||||
retryCount := 0
|
||||
wait := true
|
||||
logWaiting := false
|
||||
|
||||
// Start listening for new blocks
|
||||
retryCount := 0
|
||||
for i := 0; rep == 0 || i < rep; i++ {
|
||||
height := c.GetNextHeight()
|
||||
block, err := getBlockFromRPC(height)
|
||||
if block == nil || err != nil {
|
||||
if err != nil {
|
||||
if err != nil {
|
||||
Log.WithFields(logrus.Fields{
|
||||
"height": height,
|
||||
"error": err,
|
||||
}).Warn("error zcashd getblock rpc")
|
||||
retryCount++
|
||||
if retryCount > 10 {
|
||||
Log.WithFields(logrus.Fields{
|
||||
"height": height,
|
||||
"error": err,
|
||||
}).Warn("error with getblock rpc")
|
||||
retryCount++
|
||||
if retryCount > 10 {
|
||||
Log.WithFields(logrus.Fields{
|
||||
"timeouts": retryCount,
|
||||
}).Fatal("unable to issue RPC call to zcashd node")
|
||||
}
|
||||
"timeouts": retryCount,
|
||||
}).Fatal("unable to issue RPC call to zcashd node")
|
||||
}
|
||||
// We're up to date in our polling; wait for a new block
|
||||
// Delay then retry the same height.
|
||||
c.Sync()
|
||||
Sleep(10 * time.Second)
|
||||
wait = true
|
||||
continue
|
||||
}
|
||||
retryCount = 0
|
||||
|
||||
if (height % 100) == 0 {
|
||||
Log.Info("Ingestor adding block to cache: ", height)
|
||||
if block == nil {
|
||||
// No block at this height.
|
||||
if wait {
|
||||
// Wait a bit then retry the same height.
|
||||
c.Sync()
|
||||
if !logWaiting {
|
||||
logWaiting = true
|
||||
Log.Info("Ingestor waiting for block: ", height)
|
||||
}
|
||||
Sleep(10 * time.Second)
|
||||
wait = false
|
||||
continue
|
||||
}
|
||||
}
|
||||
reorg, err := cache.Add(height, block)
|
||||
if err != nil {
|
||||
Log.Fatal("Cache add failed")
|
||||
}
|
||||
|
||||
// Check for reorgs once we have inital block hash from startup
|
||||
if reorg {
|
||||
// This must back up at least 1, but it's arbitrary, any value
|
||||
// will work; this is probably a good balance.
|
||||
height -= 2
|
||||
reorgCount++
|
||||
if reorgCount > 10 {
|
||||
if block == nil || c.HashMismatch(block.PrevHash) {
|
||||
// This may not be a reorg; it may be we're at the tip
|
||||
// and there's no new block yet, but we want to back up
|
||||
// so we detect a reorg in which the new chain is the
|
||||
// same length or shorter.
|
||||
reorgCount += 1
|
||||
if reorgCount > 100 {
|
||||
Log.Fatal("Reorg exceeded max of 100 blocks! Help!")
|
||||
}
|
||||
Log.WithFields(logrus.Fields{
|
||||
"height": height,
|
||||
"hash": displayHash(block.Hash),
|
||||
"phash": displayHash(block.PrevHash),
|
||||
"reorg": reorgCount,
|
||||
}).Warn("REORG")
|
||||
// Print the hash of the block that is getting reorg-ed away
|
||||
// as 'phash', not the prevhash of the block we just received.
|
||||
if block != nil {
|
||||
Log.WithFields(logrus.Fields{
|
||||
"height": height,
|
||||
"hash": displayHash(block.Hash),
|
||||
"phash": displayHash(c.GetLatestHash()),
|
||||
"reorg": reorgCount,
|
||||
}).Warn("REORG")
|
||||
} else if reorgCount > 1 {
|
||||
Log.WithFields(logrus.Fields{
|
||||
"height": height,
|
||||
"phash": displayHash(c.GetLatestHash()),
|
||||
"reorg": reorgCount,
|
||||
}).Warn("REORG")
|
||||
}
|
||||
// Try backing up
|
||||
c.Reorg(height - 1)
|
||||
Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
// We have a valid block to add.
|
||||
wait = true
|
||||
reorgCount = 0
|
||||
height++
|
||||
if err := c.Add(height, block); err != nil {
|
||||
Log.Fatal("Cache add failed:", err)
|
||||
}
|
||||
// Don't log these too often.
|
||||
if time.Now().Sub(lastLog).Seconds() >= 4 && c.GetNextHeight() == height+1 && height != lastHeightReported {
|
||||
lastLog = time.Now()
|
||||
lastHeightReported = height
|
||||
Log.Info("Ingestor adding block to cache: ", height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -160,64 +160,102 @@ func getblockStub(method string, params []json.RawMessage) (json.RawMessage, err
|
|||
step++
|
||||
switch step {
|
||||
case 1:
|
||||
if height != "20" {
|
||||
if height != "380640" {
|
||||
testT.Error("unexpected height")
|
||||
}
|
||||
// Sunny-day
|
||||
return blocks[0], nil
|
||||
case 2:
|
||||
if height != "21" {
|
||||
if height != "380641" {
|
||||
testT.Error("unexpected height")
|
||||
}
|
||||
// Sunny-day
|
||||
return blocks[1], nil
|
||||
case 3:
|
||||
if height != "22" {
|
||||
testT.Error("unexpected height")
|
||||
if height != "380642" {
|
||||
testT.Error("unexpected height", height)
|
||||
}
|
||||
// This should cause one sleep (then retry)
|
||||
// Simulate that we're synced (caught up);
|
||||
// this should cause one 10s sleep (then retry).
|
||||
return nil, errors.New("-8: Block height out of range")
|
||||
case 4:
|
||||
if sleepCount != 1 || sleepDuration != 10*time.Second {
|
||||
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
|
||||
}
|
||||
// should re-request the same height
|
||||
if height != "22" {
|
||||
testT.Error("unexpected height")
|
||||
if height != "380642" {
|
||||
testT.Error("unexpected height", height)
|
||||
}
|
||||
// Back to sunny-day
|
||||
return blocks[2], nil
|
||||
// Simulate that we're still caught up; this should cause a 1s
|
||||
// wait then a check for reorg to shorter chain (back up one).
|
||||
return nil, errors.New("-8: Block height out of range")
|
||||
case 5:
|
||||
if height != "23" {
|
||||
testT.Error("unexpected height")
|
||||
}
|
||||
// Simulate a reorg (it doesn't matter which block we return here, as
|
||||
// long as its prevhash doesn't match the latest block's hash)
|
||||
return blocks[2], nil
|
||||
case 6:
|
||||
// When a reorg occurs, the ingestor backs up 2 blocks
|
||||
if height != "21" { // 23 - 2
|
||||
testT.Error("unexpected height")
|
||||
}
|
||||
return blocks[1], nil
|
||||
case 7:
|
||||
if height != "22" {
|
||||
testT.Error("unexpected height")
|
||||
}
|
||||
// Should fail to Unmarshal the block, sleep, retry
|
||||
return nil, nil
|
||||
case 8:
|
||||
if sleepCount != 2 || sleepDuration != 20*time.Second {
|
||||
if sleepCount != 2 || sleepDuration != 11*time.Second {
|
||||
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
|
||||
}
|
||||
if height != "22" {
|
||||
testT.Error("unexpected height")
|
||||
// should re-request the same height
|
||||
if height != "380641" {
|
||||
testT.Error("unexpected height", height)
|
||||
}
|
||||
// Return the expected block (as normally happens, no actual reorg),
|
||||
// ingestor will immediately re-request the next block (42).
|
||||
return blocks[1], nil
|
||||
case 6:
|
||||
if sleepCount != 2 || sleepDuration != 11*time.Second {
|
||||
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
|
||||
}
|
||||
if height != "380642" {
|
||||
testT.Error("unexpected height", height)
|
||||
}
|
||||
// Block 42 has now finally appeared, it will immediately ask for 43.
|
||||
return blocks[2], nil
|
||||
case 7:
|
||||
if sleepCount != 2 || sleepDuration != 11*time.Second {
|
||||
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
|
||||
}
|
||||
if height != "380643" {
|
||||
testT.Error("unexpected height", height)
|
||||
}
|
||||
// Simulate a reorg by modifying the block's hash temporarily,
|
||||
// this causes a 1s sleep and then back up one block (to 42).
|
||||
blocks[3][9]++ // first byte of the prevhash
|
||||
return blocks[3], nil
|
||||
case 8:
|
||||
blocks[3][9]-- // repair first byte of the prevhash
|
||||
if sleepCount != 3 || sleepDuration != 12*time.Second {
|
||||
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
|
||||
}
|
||||
if height != "380642" {
|
||||
testT.Error("unexpected height ", height)
|
||||
}
|
||||
return blocks[2], nil
|
||||
case 9:
|
||||
if sleepCount != 3 || sleepDuration != 12*time.Second {
|
||||
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
|
||||
}
|
||||
if height != "380643" {
|
||||
testT.Error("unexpected height ", height)
|
||||
}
|
||||
// Instead of returning expected (43), simulate block unmarshal
|
||||
// failure, should cause 10s sleep, retry
|
||||
return nil, nil
|
||||
case 10:
|
||||
if sleepCount != 4 || sleepDuration != 22*time.Second {
|
||||
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
|
||||
}
|
||||
if height != "380643" {
|
||||
testT.Error("unexpected height ", height)
|
||||
}
|
||||
// Back to sunny-day
|
||||
return blocks[2], nil
|
||||
}
|
||||
if height != "23" {
|
||||
testT.Error("unexpected height")
|
||||
return blocks[3], nil
|
||||
case 11:
|
||||
if sleepCount != 4 || sleepDuration != 22*time.Second {
|
||||
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
|
||||
}
|
||||
if height != "380644" {
|
||||
testT.Error("unexpected height ", height)
|
||||
}
|
||||
// next block not ready
|
||||
return nil, nil
|
||||
}
|
||||
testT.Error("getblockStub called too many times")
|
||||
return nil, nil
|
||||
|
@ -227,25 +265,28 @@ func TestBlockIngestor(t *testing.T) {
|
|||
testT = t
|
||||
RawRequest = getblockStub
|
||||
Sleep = sleepStub
|
||||
testcache := NewBlockCache(4)
|
||||
BlockIngestor(testcache, 20, 7)
|
||||
if step != 7 {
|
||||
os.RemoveAll(unitTestPath)
|
||||
testcache := NewBlockCache(unitTestPath, unitTestChain, 380640, false)
|
||||
BlockIngestor(testcache, 11)
|
||||
if step != 11 {
|
||||
t.Error("unexpected final step", step)
|
||||
}
|
||||
step = 0
|
||||
sleepCount = 0
|
||||
sleepDuration = 0
|
||||
os.RemoveAll(unitTestPath)
|
||||
}
|
||||
|
||||
func TestGetBlockRange(t *testing.T) {
|
||||
testT = t
|
||||
RawRequest = getblockStub
|
||||
testcache := NewBlockCache(4)
|
||||
os.RemoveAll(unitTestPath)
|
||||
testcache := NewBlockCache(unitTestPath, unitTestChain, 380640, true)
|
||||
blockChan := make(chan walletrpc.CompactBlock)
|
||||
errChan := make(chan error)
|
||||
go GetBlockRange(testcache, blockChan, errChan, 20, 22)
|
||||
go GetBlockRange(testcache, blockChan, errChan, 380640, 380642)
|
||||
|
||||
// read in block 20
|
||||
// read in block 380640
|
||||
select {
|
||||
case err := <-errChan:
|
||||
// this will also catch context.DeadlineExceeded from the timeout
|
||||
|
@ -256,7 +297,7 @@ func TestGetBlockRange(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// read in block 21
|
||||
// read in block 380641
|
||||
select {
|
||||
case err := <-errChan:
|
||||
// this will also catch context.DeadlineExceeded from the timeout
|
||||
|
@ -267,7 +308,7 @@ func TestGetBlockRange(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// try to read in block 22, but this will fail (see case 3 above)
|
||||
// try to read in block 380642, but this will fail (see case 3 above)
|
||||
select {
|
||||
case err := <-errChan:
|
||||
// this will also catch context.DeadlineExceeded from the timeout
|
||||
|
@ -284,6 +325,7 @@ func TestGetBlockRange(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal("unexpected err return")
|
||||
}
|
||||
os.RemoveAll(unitTestPath)
|
||||
}
|
||||
|
||||
func TestGenerateCerts(t *testing.T) {
|
||||
|
|
|
@ -26,12 +26,26 @@ var (
|
|||
logger = logrus.New()
|
||||
step int
|
||||
|
||||
cache *common.BlockCache
|
||||
lwd walletrpc.CompactTxStreamerServer
|
||||
blocks [][]byte // four test blocks
|
||||
rawTxData [][]byte
|
||||
)
|
||||
|
||||
const (
|
||||
unitTestPath = "unittestcache"
|
||||
unitTestChain = "unittestnet"
|
||||
)
|
||||
|
||||
func testsetup() (walletrpc.CompactTxStreamerServer, *common.BlockCache) {
|
||||
os.RemoveAll(unitTestPath)
|
||||
cache := common.NewBlockCache(unitTestPath, unitTestChain, 380640, true)
|
||||
lwd, err := NewLwdStreamer(cache)
|
||||
if err != nil {
|
||||
os.Stderr.WriteString(fmt.Sprint("NewLwdStreamer failed:", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
return lwd, cache
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
output, err := os.OpenFile("test-log", os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
|
@ -43,13 +57,6 @@ func TestMain(m *testing.M) {
|
|||
"app": "test",
|
||||
})
|
||||
|
||||
cache = common.NewBlockCache(4)
|
||||
lwd, err = NewLwdStreamer(cache)
|
||||
if err != nil {
|
||||
os.Stderr.WriteString(fmt.Sprint("NewLwdStreamer failed:", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Several tests need test blocks; read all 4 into memory just once
|
||||
// (for efficiency).
|
||||
testBlocks, err := os.Open("../testdata/blocks")
|
||||
|
@ -57,6 +64,7 @@ func TestMain(m *testing.M) {
|
|||
os.Stderr.WriteString(fmt.Sprint("Error:", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
defer testBlocks.Close()
|
||||
scan := bufio.NewScanner(testBlocks)
|
||||
for scan.Scan() { // each line (block)
|
||||
block := scan.Bytes()
|
||||
|
@ -97,12 +105,15 @@ func TestMain(m *testing.M) {
|
|||
|
||||
// cleanup
|
||||
os.Remove("test-log")
|
||||
os.RemoveAll(unitTestPath)
|
||||
|
||||
os.Exit(exitcode)
|
||||
}
|
||||
|
||||
func TestGetTransaction(t *testing.T) {
|
||||
// GetTransaction() will mostly be tested below via TestGetAddressTxids
|
||||
lwd, _ := testsetup()
|
||||
|
||||
rawtx, err := lwd.GetTransaction(context.Background(),
|
||||
&walletrpc.TxFilter{})
|
||||
if err == nil {
|
||||
|
@ -135,7 +146,7 @@ func getblockStub(method string, params []json.RawMessage) (json.RawMessage, err
|
|||
if err != nil {
|
||||
testT.Fatal("could not unmarshal height")
|
||||
}
|
||||
if height != "1234" {
|
||||
if height != "380640" {
|
||||
testT.Fatal("unexpected getblock height", height)
|
||||
}
|
||||
|
||||
|
@ -153,6 +164,8 @@ func getblockStub(method string, params []json.RawMessage) (json.RawMessage, err
|
|||
func TestGetLatestBlock(t *testing.T) {
|
||||
testT = t
|
||||
common.RawRequest = getblockStub
|
||||
lwd, cache := testsetup()
|
||||
|
||||
// This argument is not used (it may be in the future)
|
||||
req := &walletrpc.ChainSpec{}
|
||||
|
||||
|
@ -168,22 +181,18 @@ func TestGetLatestBlock(t *testing.T) {
|
|||
}
|
||||
|
||||
// This does zcashd rpc "getblock", calls getblockStub() above
|
||||
block, err := common.GetBlock(cache, 1234)
|
||||
block, err := common.GetBlock(cache, 380640)
|
||||
if err != nil {
|
||||
t.Fatal("getBlockFromRPC failed", err)
|
||||
}
|
||||
reorg, err := cache.Add(40, block)
|
||||
if reorg {
|
||||
t.Fatal("unexpected reorg")
|
||||
}
|
||||
if err != nil {
|
||||
if err = cache.Add(380640, block); err != nil {
|
||||
t.Fatal("cache.Add failed:", err)
|
||||
}
|
||||
blockID, err = lwd.GetLatestBlock(context.Background(), req)
|
||||
if err != nil {
|
||||
t.Fatal("lwd.GetLatestBlock failed", err)
|
||||
}
|
||||
if blockID.Height != 40 {
|
||||
if blockID.Height != 380640 {
|
||||
t.Fatal("unexpected blockID.height")
|
||||
}
|
||||
step = 0
|
||||
|
@ -265,6 +274,8 @@ func (tg *testgettx) Send(tx *walletrpc.RawTransaction) error {
|
|||
func TestGetAddressTxids(t *testing.T) {
|
||||
testT = t
|
||||
common.RawRequest = zcashdrpcStub
|
||||
lwd, _ := testsetup()
|
||||
|
||||
addressBlockFilter := &walletrpc.TransparentAddressBlockFilter{
|
||||
Range: &walletrpc.BlockRange{
|
||||
Start: &walletrpc.BlockID{Height: 20},
|
||||
|
@ -302,6 +313,8 @@ func TestGetAddressTxids(t *testing.T) {
|
|||
func TestGetBlock(t *testing.T) {
|
||||
testT = t
|
||||
common.RawRequest = getblockStub
|
||||
lwd, _ := testsetup()
|
||||
|
||||
_, err := lwd.GetBlock(context.Background(), &walletrpc.BlockID{})
|
||||
if err == nil {
|
||||
t.Fatal("GetBlock should have failed")
|
||||
|
@ -317,7 +330,9 @@ func TestGetBlock(t *testing.T) {
|
|||
if err.Error() != "GetBlock by Hash is not yet implemented" {
|
||||
t.Fatal("GetBlock hash unimplemented error message failed")
|
||||
}
|
||||
block, err := lwd.GetBlock(context.Background(), &walletrpc.BlockID{Height: 1234})
|
||||
|
||||
// getblockStub() case 1: return error
|
||||
block, err := lwd.GetBlock(context.Background(), &walletrpc.BlockID{Height: 380640})
|
||||
if err != nil {
|
||||
t.Fatal("GetBlock failed:", err)
|
||||
}
|
||||
|
@ -325,7 +340,7 @@ func TestGetBlock(t *testing.T) {
|
|||
t.Fatal("GetBlock returned unexpected block:", err)
|
||||
}
|
||||
// getblockStub() case 2: return error
|
||||
block, err = lwd.GetBlock(context.Background(), &walletrpc.BlockID{Height: 1234})
|
||||
block, err = lwd.GetBlock(context.Background(), &walletrpc.BlockID{Height: 380640})
|
||||
if err == nil {
|
||||
t.Fatal("GetBlock should have failed")
|
||||
}
|
||||
|
@ -350,9 +365,12 @@ func (tg *testgetbrange) Send(cb *walletrpc.CompactBlock) error {
|
|||
func TestGetBlockRange(t *testing.T) {
|
||||
testT = t
|
||||
common.RawRequest = getblockStub
|
||||
common.RawRequest = getblockStub
|
||||
lwd, _ := testsetup()
|
||||
|
||||
blockrange := &walletrpc.BlockRange{
|
||||
Start: &walletrpc.BlockID{Height: 1234},
|
||||
End: &walletrpc.BlockID{Height: 1234},
|
||||
Start: &walletrpc.BlockID{Height: 380640},
|
||||
End: &walletrpc.BlockID{Height: 380640},
|
||||
}
|
||||
// getblockStub() case 1 (success)
|
||||
err := lwd.GetBlockRange(blockrange, &testgetbrange{})
|
||||
|
@ -376,6 +394,8 @@ func getblockchaininfoStub(method string, params []json.RawMessage) (json.RawMes
|
|||
func TestGetLightdInfo(t *testing.T) {
|
||||
testT = t
|
||||
common.RawRequest = getblockchaininfoStub
|
||||
lwd, _ := testsetup()
|
||||
|
||||
ldinfo, err := lwd.GetLightdInfo(context.Background(), &walletrpc.Empty{})
|
||||
if err != nil {
|
||||
t.Fatal("GetLightdInfo failed", err)
|
||||
|
@ -406,6 +426,7 @@ func sendrawtransactionStub(method string, params []json.RawMessage) (json.RawMe
|
|||
|
||||
func TestSendTransaction(t *testing.T) {
|
||||
testT = t
|
||||
lwd, _ := testsetup()
|
||||
common.RawRequest = sendrawtransactionStub
|
||||
rawtx := walletrpc.RawTransaction{Data: []byte{7}}
|
||||
sendresult, err := lwd.SendTransaction(context.Background(), &rawtx)
|
||||
|
|
8
go.mod
8
go.mod
|
@ -14,11 +14,11 @@ require (
|
|||
github.com/spf13/viper v1.6.2
|
||||
github.com/stretchr/testify v1.4.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c // indirect
|
||||
golang.org/x/net v0.0.0-20200319234117-63522dbf7eec // indirect
|
||||
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d // indirect
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect
|
||||
golang.org/x/sys v0.0.0-20200408040146-ea54a3c99b9b // indirect
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200319113533-08878b785e9c // indirect
|
||||
google.golang.org/grpc v1.28.0
|
||||
google.golang.org/genproto v0.0.0-20200408120641-fbb3ad325eb7 // indirect
|
||||
google.golang.org/grpc v1.28.1
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
gopkg.in/ini.v1 v1.51.0
|
||||
)
|
||||
|
|
18
go.sum
18
go.sum
|
@ -187,8 +187,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
|
|||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20200319234117-63522dbf7eec h1:w0SItUiQ4sBiXBAwWNkyu8Fu2Qpn/dtDIcoPkPDqjRw=
|
||||
golang.org/x/net v0.0.0-20200319234117-63522dbf7eec/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
|
@ -205,8 +205,10 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d h1:62ap6LNOjDU6uGmKXHJbSfciMoV+FeI1sRXx/pLDL44=
|
||||
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200408040146-ea54a3c99b9b h1:h03Ur1RlPrGTjua4koYdpGl8W0eYo8p1uI9w7RPlkdk=
|
||||
golang.org/x/sys v0.0.0-20200408040146-ea54a3c99b9b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
|
@ -224,16 +226,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
|||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200319113533-08878b785e9c h1:5aI3/f/3eCZps9xwoEnmgfDJDhMbnJpfqeGpjVNgVEI=
|
||||
google.golang.org/genproto v0.0.0-20200319113533-08878b785e9c/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200408120641-fbb3ad325eb7 h1:AMRSRXQjlgdwNhezZB0hscb7mJ4AK/UCM6uNIlEknCc=
|
||||
google.golang.org/genproto v0.0.0-20200408120641-fbb3ad325eb7/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k=
|
||||
google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
// This program increments a given byte of a given file,
|
||||
// to test data corruption detection -- BE CAREFUL!
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 3 {
|
||||
fmt.Println("usage:", os.Args[0], "file offset")
|
||||
os.Exit(1)
|
||||
}
|
||||
f, err := os.OpenFile(os.Args[1], os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
fmt.Println("open failed:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
offset, err := strconv.ParseInt(os.Args[2], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Println("bad offset:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
b := make([]byte, 1)
|
||||
if n, err := f.ReadAt(b, offset); err != nil || n != 1 {
|
||||
fmt.Println("read failed:", n, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
b[0] += 1
|
||||
if n, err := f.WriteAt(b, offset); err != nil || n != 1 {
|
||||
fmt.Println("read failed:", n, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
|
@ -89,7 +89,7 @@ constants.
|
|||
|
||||
Adding new syscall numbers is mostly done by running the build on a sufficiently
|
||||
new installation of the target OS (or updating the source checkouts for the
|
||||
new build system). However, depending on the OS, you make need to update the
|
||||
new build system). However, depending on the OS, you may need to update the
|
||||
parsing in mksysnum.
|
||||
|
||||
### mksyscall.go
|
||||
|
@ -163,7 +163,7 @@ The merge is performed in the following steps:
|
|||
|
||||
## Generated files
|
||||
|
||||
### `zerror_${GOOS}_${GOARCH}.go`
|
||||
### `zerrors_${GOOS}_${GOARCH}.go`
|
||||
|
||||
A file containing all of the system's generated error numbers, error strings,
|
||||
signal numbers, and constants. Generated by `mkerrors.sh` (see above).
|
||||
|
|
|
@ -190,6 +190,12 @@ solaris_amd64)
|
|||
mksysnum=
|
||||
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
|
||||
;;
|
||||
illumos_amd64)
|
||||
mksyscall="go run mksyscall_solaris.go"
|
||||
mkerrors=
|
||||
mksysnum=
|
||||
mktypes=
|
||||
;;
|
||||
*)
|
||||
echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
|
||||
exit 1
|
||||
|
@ -217,6 +223,11 @@ esac
|
|||
echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
|
||||
# 1.13 and later, syscalls via libSystem (including syscallPtr)
|
||||
echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go";
|
||||
elif [ "$GOOS" == "illumos" ]; then
|
||||
# illumos code generation requires a --illumos switch
|
||||
echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go";
|
||||
# illumos implies solaris, so solaris code generation is also required
|
||||
echo "$mksyscall -tags solaris,$GOARCH syscall_solaris.go syscall_solaris_$GOARCH.go |gofmt >zsyscall_solaris_$GOARCH.go";
|
||||
else
|
||||
echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
|
||||
fi
|
||||
|
|
|
@ -200,6 +200,7 @@ struct ltchars {
|
|||
#include <linux/filter.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/fscrypt.h>
|
||||
#include <linux/fsverity.h>
|
||||
#include <linux/genetlink.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/icmpv6.h>
|
||||
|
@ -485,6 +486,7 @@ ccflags="$@"
|
|||
$2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
|
||||
$2 ~ /^MODULE_INIT_/ ||
|
||||
$2 !~ "NLA_TYPE_MASK" &&
|
||||
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
|
||||
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
|
||||
$2 ~ /^SIOC/ ||
|
||||
$2 ~ /^TIOC/ ||
|
||||
|
@ -506,7 +508,8 @@ ccflags="$@"
|
|||
$2 ~ /^CAP_/ ||
|
||||
$2 ~ /^ALG_/ ||
|
||||
$2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ ||
|
||||
$2 ~ /^FS_IOC_.*ENCRYPTION/ ||
|
||||
$2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|GETFLAGS)/ ||
|
||||
$2 ~ /^FS_VERITY_/ ||
|
||||
$2 ~ /^FSCRYPT_/ ||
|
||||
$2 ~ /^GRND_/ ||
|
||||
$2 ~ /^RND/ ||
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// illumos system calls not present on Solaris.
|
||||
|
||||
// +build amd64,illumos
|
||||
|
||||
package unix
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func bytes2iovec(bs [][]byte) []Iovec {
|
||||
iovecs := make([]Iovec, len(bs))
|
||||
for i, b := range bs {
|
||||
iovecs[i].SetLen(len(b))
|
||||
if len(b) > 0 {
|
||||
// somehow Iovec.Base on illumos is (*int8), not (*byte)
|
||||
iovecs[i].Base = (*int8)(unsafe.Pointer(&b[0]))
|
||||
} else {
|
||||
iovecs[i].Base = (*int8)(unsafe.Pointer(&_zero))
|
||||
}
|
||||
}
|
||||
return iovecs
|
||||
}
|
||||
|
||||
//sys readv(fd int, iovs []Iovec) (n int, err error)
|
||||
|
||||
func Readv(fd int, iovs [][]byte) (n int, err error) {
|
||||
iovecs := bytes2iovec(iovs)
|
||||
n, err = readv(fd, iovecs)
|
||||
return n, err
|
||||
}
|
||||
|
||||
//sys preadv(fd int, iovs []Iovec, off int64) (n int, err error)
|
||||
|
||||
func Preadv(fd int, iovs [][]byte, off int64) (n int, err error) {
|
||||
iovecs := bytes2iovec(iovs)
|
||||
n, err = preadv(fd, iovecs, off)
|
||||
return n, err
|
||||
}
|
||||
|
||||
//sys writev(fd int, iovs []Iovec) (n int, err error)
|
||||
|
||||
func Writev(fd int, iovs [][]byte) (n int, err error) {
|
||||
iovecs := bytes2iovec(iovs)
|
||||
n, err = writev(fd, iovecs)
|
||||
return n, err
|
||||
}
|
||||
|
||||
//sys pwritev(fd int, iovs []Iovec, off int64) (n int, err error)
|
||||
|
||||
func Pwritev(fd int, iovs [][]byte, off int64) (n int, err error) {
|
||||
iovecs := bytes2iovec(iovs)
|
||||
n, err = pwritev(fd, iovecs, off)
|
||||
return n, err
|
||||
}
|
|
@ -133,6 +133,12 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
|
|||
return openat(dirfd, path, flags|O_LARGEFILE, mode)
|
||||
}
|
||||
|
||||
//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)
|
||||
|
||||
func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) {
|
||||
return openat2(dirfd, path, how, SizeofOpenHow)
|
||||
}
|
||||
|
||||
//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
|
||||
|
||||
func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
|
||||
|
|
|
@ -76,7 +76,7 @@ func SignalName(s syscall.Signal) string {
|
|||
// The signal name should start with "SIG".
|
||||
func SignalNum(s string) syscall.Signal {
|
||||
signalNameMapOnce.Do(func() {
|
||||
signalNameMap = make(map[string]syscall.Signal)
|
||||
signalNameMap = make(map[string]syscall.Signal, len(signalList))
|
||||
for _, signal := range signalList {
|
||||
signalNameMap[signal.name] = signal.num
|
||||
}
|
||||
|
|
|
@ -216,6 +216,7 @@ const (
|
|||
BPF_F_RDONLY = 0x8
|
||||
BPF_F_RDONLY_PROG = 0x80
|
||||
BPF_F_RECOMPUTE_CSUM = 0x1
|
||||
BPF_F_REPLACE = 0x4
|
||||
BPF_F_REUSE_STACKID = 0x400
|
||||
BPF_F_SEQ_NUMBER = 0x8
|
||||
BPF_F_SKIP_FIELD_MASK = 0xff
|
||||
|
@ -389,6 +390,7 @@ const (
|
|||
CLONE_NEWNET = 0x40000000
|
||||
CLONE_NEWNS = 0x20000
|
||||
CLONE_NEWPID = 0x20000000
|
||||
CLONE_NEWTIME = 0x80
|
||||
CLONE_NEWUSER = 0x10000000
|
||||
CLONE_NEWUTS = 0x4000000
|
||||
CLONE_PARENT = 0x8000
|
||||
|
@ -671,6 +673,7 @@ const (
|
|||
FS_IOC_ADD_ENCRYPTION_KEY = 0xc0506617
|
||||
FS_IOC_GET_ENCRYPTION_KEY_STATUS = 0xc080661a
|
||||
FS_IOC_GET_ENCRYPTION_POLICY_EX = 0xc0096616
|
||||
FS_IOC_MEASURE_VERITY = 0xc0046686
|
||||
FS_IOC_REMOVE_ENCRYPTION_KEY = 0xc0406618
|
||||
FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS = 0xc0406619
|
||||
FS_KEY_DESCRIPTOR_SIZE = 0x8
|
||||
|
@ -683,6 +686,9 @@ const (
|
|||
FS_POLICY_FLAGS_PAD_8 = 0x1
|
||||
FS_POLICY_FLAGS_PAD_MASK = 0x3
|
||||
FS_POLICY_FLAGS_VALID = 0xf
|
||||
FS_VERITY_FL = 0x100000
|
||||
FS_VERITY_HASH_ALG_SHA256 = 0x1
|
||||
FS_VERITY_HASH_ALG_SHA512 = 0x2
|
||||
FUTEXFS_SUPER_MAGIC = 0xbad1dea
|
||||
F_ADD_SEALS = 0x409
|
||||
F_DUPFD = 0x0
|
||||
|
@ -733,6 +739,7 @@ const (
|
|||
GENL_NAMSIZ = 0x10
|
||||
GENL_START_ALLOC = 0x13
|
||||
GENL_UNS_ADMIN_PERM = 0x10
|
||||
GRND_INSECURE = 0x4
|
||||
GRND_NONBLOCK = 0x1
|
||||
GRND_RANDOM = 0x2
|
||||
HDIO_DRIVE_CMD = 0x31f
|
||||
|
@ -1483,6 +1490,7 @@ const (
|
|||
PR_GET_FPEMU = 0x9
|
||||
PR_GET_FPEXC = 0xb
|
||||
PR_GET_FP_MODE = 0x2e
|
||||
PR_GET_IO_FLUSHER = 0x3a
|
||||
PR_GET_KEEPCAPS = 0x7
|
||||
PR_GET_NAME = 0x10
|
||||
PR_GET_NO_NEW_PRIVS = 0x27
|
||||
|
@ -1518,6 +1526,7 @@ const (
|
|||
PR_SET_FPEMU = 0xa
|
||||
PR_SET_FPEXC = 0xc
|
||||
PR_SET_FP_MODE = 0x2d
|
||||
PR_SET_IO_FLUSHER = 0x39
|
||||
PR_SET_KEEPCAPS = 0x8
|
||||
PR_SET_MM = 0x23
|
||||
PR_SET_MM_ARG_END = 0x9
|
||||
|
@ -1746,12 +1755,15 @@ const (
|
|||
RTM_DELRULE = 0x21
|
||||
RTM_DELTCLASS = 0x29
|
||||
RTM_DELTFILTER = 0x2d
|
||||
RTM_DELVLAN = 0x71
|
||||
RTM_F_CLONED = 0x200
|
||||
RTM_F_EQUALIZE = 0x400
|
||||
RTM_F_FIB_MATCH = 0x2000
|
||||
RTM_F_LOOKUP_TABLE = 0x1000
|
||||
RTM_F_NOTIFY = 0x100
|
||||
RTM_F_OFFLOAD = 0x4000
|
||||
RTM_F_PREFIX = 0x800
|
||||
RTM_F_TRAP = 0x8000
|
||||
RTM_GETACTION = 0x32
|
||||
RTM_GETADDR = 0x16
|
||||
RTM_GETADDRLABEL = 0x4a
|
||||
|
@ -1773,7 +1785,8 @@ const (
|
|||
RTM_GETSTATS = 0x5e
|
||||
RTM_GETTCLASS = 0x2a
|
||||
RTM_GETTFILTER = 0x2e
|
||||
RTM_MAX = 0x6f
|
||||
RTM_GETVLAN = 0x72
|
||||
RTM_MAX = 0x73
|
||||
RTM_NEWACTION = 0x30
|
||||
RTM_NEWADDR = 0x14
|
||||
RTM_NEWADDRLABEL = 0x48
|
||||
|
@ -1788,6 +1801,7 @@ const (
|
|||
RTM_NEWNETCONF = 0x50
|
||||
RTM_NEWNEXTHOP = 0x68
|
||||
RTM_NEWNSID = 0x58
|
||||
RTM_NEWNVLAN = 0x70
|
||||
RTM_NEWPREFIX = 0x34
|
||||
RTM_NEWQDISC = 0x24
|
||||
RTM_NEWROUTE = 0x18
|
||||
|
@ -1795,8 +1809,8 @@ const (
|
|||
RTM_NEWSTATS = 0x5c
|
||||
RTM_NEWTCLASS = 0x28
|
||||
RTM_NEWTFILTER = 0x2c
|
||||
RTM_NR_FAMILIES = 0x18
|
||||
RTM_NR_MSGTYPES = 0x60
|
||||
RTM_NR_FAMILIES = 0x19
|
||||
RTM_NR_MSGTYPES = 0x64
|
||||
RTM_SETDCB = 0x4f
|
||||
RTM_SETLINK = 0x13
|
||||
RTM_SETNEIGHTBL = 0x43
|
||||
|
@ -2086,7 +2100,7 @@ const (
|
|||
TASKSTATS_GENL_NAME = "TASKSTATS"
|
||||
TASKSTATS_GENL_VERSION = 0x1
|
||||
TASKSTATS_TYPE_MAX = 0x6
|
||||
TASKSTATS_VERSION = 0x9
|
||||
TASKSTATS_VERSION = 0xa
|
||||
TCIFLUSH = 0x0
|
||||
TCIOFF = 0x2
|
||||
TCIOFLUSH = 0x2
|
||||
|
@ -2267,7 +2281,7 @@ const (
|
|||
VMADDR_CID_ANY = 0xffffffff
|
||||
VMADDR_CID_HOST = 0x2
|
||||
VMADDR_CID_HYPERVISOR = 0x0
|
||||
VMADDR_CID_RESERVED = 0x1
|
||||
VMADDR_CID_LOCAL = 0x1
|
||||
VMADDR_PORT_ANY = 0xffffffff
|
||||
VM_SOCKETS_INVALID_VERSION = 0xffffffff
|
||||
VQUIT = 0x1
|
||||
|
@ -2394,6 +2408,7 @@ const (
|
|||
XENFS_SUPER_MAGIC = 0xabba1974
|
||||
XFS_SUPER_MAGIC = 0x58465342
|
||||
Z3FOLD_MAGIC = 0x33
|
||||
ZONEFS_MAGIC = 0x5a4f4653
|
||||
ZSMALLOC_MAGIC = 0x58295829
|
||||
)
|
||||
|
||||
|
|
|
@ -73,6 +73,8 @@ const (
|
|||
FFDLY = 0x8000
|
||||
FLUSHO = 0x1000
|
||||
FP_XSTATE_MAGIC2 = 0x46505845
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80046601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
|
@ -73,6 +73,8 @@ const (
|
|||
FFDLY = 0x8000
|
||||
FLUSHO = 0x1000
|
||||
FP_XSTATE_MAGIC2 = 0x46505845
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80086601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
|
@ -72,6 +72,8 @@ const (
|
|||
FF1 = 0x8000
|
||||
FFDLY = 0x8000
|
||||
FLUSHO = 0x1000
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80046601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
|
@ -75,6 +75,8 @@ const (
|
|||
FFDLY = 0x8000
|
||||
FLUSHO = 0x1000
|
||||
FPSIMD_MAGIC = 0x46508001
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80086601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
|
@ -72,6 +72,8 @@ const (
|
|||
FF1 = 0x8000
|
||||
FFDLY = 0x8000
|
||||
FLUSHO = 0x2000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40046601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
|
@ -72,6 +72,8 @@ const (
|
|||
FF1 = 0x8000
|
||||
FFDLY = 0x8000
|
||||
FLUSHO = 0x2000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40086601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
|
@ -72,6 +72,8 @@ const (
|
|||
FF1 = 0x8000
|
||||
FFDLY = 0x8000
|
||||
FLUSHO = 0x2000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40086601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
|
@ -72,6 +72,8 @@ const (
|
|||
FF1 = 0x8000
|
||||
FFDLY = 0x8000
|
||||
FLUSHO = 0x2000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40046601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
|
@ -72,6 +72,8 @@ const (
|
|||
FF1 = 0x4000
|
||||
FFDLY = 0x4000
|
||||
FLUSHO = 0x800000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40086601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
|
@ -72,6 +72,8 @@ const (
|
|||
FF1 = 0x4000
|
||||
FFDLY = 0x4000
|
||||
FLUSHO = 0x800000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40086601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
|
@ -72,6 +72,8 @@ const (
|
|||
FF1 = 0x8000
|
||||
FFDLY = 0x8000
|
||||
FLUSHO = 0x1000
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80086601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
|
@ -72,6 +72,8 @@ const (
|
|||
FF1 = 0x8000
|
||||
FFDLY = 0x8000
|
||||
FLUSHO = 0x1000
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80086601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
|
@ -76,6 +76,8 @@ const (
|
|||
FF1 = 0x8000
|
||||
FFDLY = 0x8000
|
||||
FLUSHO = 0x1000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40086601
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
// go run mksyscall_solaris.go -illumos -tags illumos,amd64 syscall_illumos.go
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build illumos,amd64
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//go:cgo_import_dynamic libc_readv readv "libc.so"
|
||||
//go:cgo_import_dynamic libc_preadv preadv "libc.so"
|
||||
//go:cgo_import_dynamic libc_writev writev "libc.so"
|
||||
//go:cgo_import_dynamic libc_pwritev pwritev "libc.so"
|
||||
|
||||
//go:linkname procreadv libc_readv
|
||||
//go:linkname procpreadv libc_preadv
|
||||
//go:linkname procwritev libc_writev
|
||||
//go:linkname procpwritev libc_pwritev
|
||||
|
||||
var (
|
||||
procreadv,
|
||||
procpreadv,
|
||||
procwritev,
|
||||
procpwritev syscallFunc
|
||||
)
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func readv(fd int, iovs []Iovec) (n int, err error) {
|
||||
var _p0 *Iovec
|
||||
if len(iovs) > 0 {
|
||||
_p0 = &iovs[0]
|
||||
}
|
||||
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procreadv)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0)
|
||||
n = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func preadv(fd int, iovs []Iovec, off int64) (n int, err error) {
|
||||
var _p0 *Iovec
|
||||
if len(iovs) > 0 {
|
||||
_p0 = &iovs[0]
|
||||
}
|
||||
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpreadv)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0)
|
||||
n = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func writev(fd int, iovs []Iovec) (n int, err error) {
|
||||
var _p0 *Iovec
|
||||
if len(iovs) > 0 {
|
||||
_p0 = &iovs[0]
|
||||
}
|
||||
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwritev)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0)
|
||||
n = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) {
|
||||
var _p0 *Iovec
|
||||
if len(iovs) > 0 {
|
||||
_p0 = &iovs[0]
|
||||
}
|
||||
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwritev)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0)
|
||||
n = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
|
@ -83,6 +83,22 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r0, _, e1 := Syscall6(SYS_OPENAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size), 0, 0)
|
||||
fd = int(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
|
||||
r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
|
||||
n = int(r0)
|
||||
|
|
|
@ -431,4 +431,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -353,4 +353,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -395,4 +395,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -298,4 +298,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -416,4 +416,6 @@ const (
|
|||
SYS_FSPICK = 4433
|
||||
SYS_PIDFD_OPEN = 4434
|
||||
SYS_CLONE3 = 4435
|
||||
SYS_OPENAT2 = 4437
|
||||
SYS_PIDFD_GETFD = 4438
|
||||
)
|
||||
|
|
|
@ -346,4 +346,6 @@ const (
|
|||
SYS_FSPICK = 5433
|
||||
SYS_PIDFD_OPEN = 5434
|
||||
SYS_CLONE3 = 5435
|
||||
SYS_OPENAT2 = 5437
|
||||
SYS_PIDFD_GETFD = 5438
|
||||
)
|
||||
|
|
|
@ -346,4 +346,6 @@ const (
|
|||
SYS_FSPICK = 5433
|
||||
SYS_PIDFD_OPEN = 5434
|
||||
SYS_CLONE3 = 5435
|
||||
SYS_OPENAT2 = 5437
|
||||
SYS_PIDFD_GETFD = 5438
|
||||
)
|
||||
|
|
|
@ -416,4 +416,6 @@ const (
|
|||
SYS_FSPICK = 4433
|
||||
SYS_PIDFD_OPEN = 4434
|
||||
SYS_CLONE3 = 4435
|
||||
SYS_OPENAT2 = 4437
|
||||
SYS_PIDFD_GETFD = 4438
|
||||
)
|
||||
|
|
|
@ -395,4 +395,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -395,4 +395,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -297,4 +297,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -360,4 +360,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -374,4 +374,6 @@ const (
|
|||
SYS_FSMOUNT = 432
|
||||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -114,7 +114,8 @@ type FscryptKeySpecifier struct {
|
|||
type FscryptAddKeyArg struct {
|
||||
Key_spec FscryptKeySpecifier
|
||||
Raw_size uint32
|
||||
_ [9]uint32
|
||||
Key_id uint32
|
||||
_ [8]uint32
|
||||
}
|
||||
|
||||
type FscryptRemoveKeyArg struct {
|
||||
|
@ -479,7 +480,7 @@ const (
|
|||
IFLA_NEW_IFINDEX = 0x31
|
||||
IFLA_MIN_MTU = 0x32
|
||||
IFLA_MAX_MTU = 0x33
|
||||
IFLA_MAX = 0x35
|
||||
IFLA_MAX = 0x36
|
||||
IFLA_INFO_KIND = 0x1
|
||||
IFLA_INFO_DATA = 0x2
|
||||
IFLA_INFO_XSTATS = 0x3
|
||||
|
@ -690,6 +691,22 @@ const (
|
|||
AT_EACCESS = 0x200
|
||||
)
|
||||
|
||||
type OpenHow struct {
|
||||
Flags uint64
|
||||
Mode uint64
|
||||
Resolve uint64
|
||||
}
|
||||
|
||||
const SizeofOpenHow = 0x18
|
||||
|
||||
const (
|
||||
RESOLVE_BENEATH = 0x8
|
||||
RESOLVE_IN_ROOT = 0x10
|
||||
RESOLVE_NO_MAGICLINKS = 0x2
|
||||
RESOLVE_NO_SYMLINKS = 0x4
|
||||
RESOLVE_NO_XDEV = 0x1
|
||||
)
|
||||
|
||||
type PollFd struct {
|
||||
Fd int32
|
||||
Events int16
|
||||
|
@ -2291,3 +2308,49 @@ const (
|
|||
DEVLINK_DPIPE_HEADER_IPV4 = 0x1
|
||||
DEVLINK_DPIPE_HEADER_IPV6 = 0x2
|
||||
)
|
||||
|
||||
type FsverityDigest struct {
|
||||
Algorithm uint16
|
||||
Size uint16
|
||||
}
|
||||
|
||||
type FsverityEnableArg struct {
|
||||
Version uint32
|
||||
Hash_algorithm uint32
|
||||
Block_size uint32
|
||||
Salt_size uint32
|
||||
Salt_ptr uint64
|
||||
Sig_size uint32
|
||||
_ uint32
|
||||
Sig_ptr uint64
|
||||
_ [11]uint64
|
||||
}
|
||||
|
||||
type Nhmsg struct {
|
||||
Family uint8
|
||||
Scope uint8
|
||||
Protocol uint8
|
||||
Resvd uint8
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
type NexthopGrp struct {
|
||||
Id uint32
|
||||
Weight uint8
|
||||
Resvd1 uint8
|
||||
Resvd2 uint16
|
||||
}
|
||||
|
||||
const (
|
||||
NHA_UNSPEC = 0x0
|
||||
NHA_ID = 0x1
|
||||
NHA_GROUP = 0x2
|
||||
NHA_GROUP_TYPE = 0x3
|
||||
NHA_BLACKHOLE = 0x4
|
||||
NHA_OIF = 0x5
|
||||
NHA_GATEWAY = 0x6
|
||||
NHA_ENCAP_TYPE = 0x7
|
||||
NHA_ENCAP = 0x8
|
||||
NHA_GROUPS = 0x9
|
||||
NHA_MASTER = 0xa
|
||||
)
|
||||
|
|
|
@ -287,6 +287,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint32
|
||||
|
|
|
@ -298,6 +298,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -276,6 +276,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint32
|
||||
|
|
|
@ -277,6 +277,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -281,6 +281,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint32
|
||||
|
|
|
@ -280,6 +280,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -280,6 +280,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -281,6 +281,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint32
|
||||
|
|
|
@ -287,6 +287,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -287,6 +287,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -305,6 +305,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -300,6 +300,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -282,6 +282,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -113,10 +113,6 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
|||
if grpclog.V(2) {
|
||||
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
|
||||
}
|
||||
if len(s.ResolverState.Addresses) == 0 {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
// Successful resolution; clear resolver error and ensure we return nil.
|
||||
b.resolverErr = nil
|
||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||
|
@ -144,6 +140,14 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
|||
// The entry will be deleted in HandleSubConnStateChange.
|
||||
}
|
||||
}
|
||||
// If resolver state contains no addresses, return an error so ClientConn
|
||||
// will trigger re-resolve. Also records this as an resolver error, so when
|
||||
// the overall state turns transient failure, the error message will have
|
||||
// the zero address information.
|
||||
if len(s.ResolverState.Addresses) == 0 {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -19,4 +19,4 @@
|
|||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.28.0"
|
||||
const Version = "1.28.1"
|
||||
|
|
|
@ -66,23 +66,23 @@ github.com/spf13/viper
|
|||
github.com/subosito/gotenv
|
||||
# golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c
|
||||
golang.org/x/crypto/ripemd160
|
||||
# golang.org/x/net v0.0.0-20200319234117-63522dbf7eec
|
||||
# golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
|
||||
golang.org/x/net/http/httpguts
|
||||
golang.org/x/net/http2
|
||||
golang.org/x/net/http2/hpack
|
||||
golang.org/x/net/idna
|
||||
golang.org/x/net/internal/timeseries
|
||||
golang.org/x/net/trace
|
||||
# golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d
|
||||
# golang.org/x/sys v0.0.0-20200408040146-ea54a3c99b9b
|
||||
golang.org/x/sys/unix
|
||||
# golang.org/x/text v0.3.2
|
||||
golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
# google.golang.org/genproto v0.0.0-20200319113533-08878b785e9c
|
||||
# google.golang.org/genproto v0.0.0-20200408120641-fbb3ad325eb7
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
# google.golang.org/grpc v1.28.0
|
||||
# google.golang.org/grpc v1.28.1
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
google.golang.org/grpc/backoff
|
||||
|
|
Loading…
Reference in New Issue