add GetLightdInfo RPC
Co-authored-by: Aditya Kulkarni <adityapk@gmail.com> also to pick up changes to walletrpc/service.proto (regenerate compact_formats.pb.go service.pb.go) cd walletrpc protoc compact_formats.proto --go_out=plugins=grpc:. protoc service.proto --go_out=plugins=grpc:. ran go mod tidy && go mod vendor
This commit is contained in:
parent
e0e3eda279
commit
f58a5f9376
|
@ -193,7 +193,7 @@ func main() {
|
|||
}
|
||||
|
||||
// Compact transaction service initialization
|
||||
service, err := frontend.NewSQLiteStreamer(opts.dbPath, rpcClient)
|
||||
service, err := frontend.NewSQLiteStreamer(opts.dbPath, rpcClient, log)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"db_path": opts.dbPath,
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func GetSaplingInfo(rpcClient *rpcclient.Client) (int, int, string, string, error) {
|
||||
result, rpcErr := rpcClient.RawRequest("getblockchaininfo", make([]json.RawMessage, 0))
|
||||
|
||||
var err error
|
||||
var errCode int64
|
||||
|
||||
// For some reason, the error responses are not JSON
|
||||
if rpcErr != nil {
|
||||
errParts := strings.SplitN(rpcErr.Error(), ":", 2)
|
||||
errCode, err = strconv.ParseInt(errParts[0], 10, 32)
|
||||
if err == nil && errCode == -8 {
|
||||
return -1, -1, "", "", nil
|
||||
}
|
||||
return -1, -1, "", "", errors.Wrap(rpcErr, "error requesting block")
|
||||
}
|
||||
|
||||
var f interface{}
|
||||
err = json.Unmarshal(result, &f)
|
||||
if err != nil {
|
||||
return -1, -1, "", "", errors.Wrap(err, "error reading JSON response")
|
||||
}
|
||||
|
||||
chainName := f.(map[string]interface{})["chain"].(string)
|
||||
|
||||
upgradeJSON := f.(map[string]interface{})["upgrades"]
|
||||
saplingJSON := upgradeJSON.(map[string]interface{})["76b809bb"] // Sapling ID
|
||||
saplingHeight := saplingJSON.(map[string]interface{})["activationheight"].(float64)
|
||||
|
||||
blockHeight := f.(map[string]interface{})["headers"].(float64)
|
||||
|
||||
consensus := f.(map[string]interface{})["consensus"]
|
||||
branchID := consensus.(map[string]interface{})["nextblock"].(string)
|
||||
|
||||
return int(saplingHeight), int(blockHeight), chainName, branchID, nil
|
||||
}
|
|
@ -17,6 +17,8 @@ import (
|
|||
// blank import for sqlite driver support
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/zcash-hackworks/lightwalletd/common"
|
||||
"github.com/zcash-hackworks/lightwalletd/storage"
|
||||
"github.com/zcash-hackworks/lightwalletd/walletrpc"
|
||||
)
|
||||
|
@ -29,9 +31,10 @@ var (
|
|||
type SqlStreamer struct {
|
||||
db *sql.DB
|
||||
client *rpcclient.Client
|
||||
log *logrus.Entry
|
||||
}
|
||||
|
||||
func NewSQLiteStreamer(dbPath string, client *rpcclient.Client) (walletrpc.CompactTxStreamerServer, error) {
|
||||
func NewSQLiteStreamer(dbPath string, client *rpcclient.Client, log *logrus.Entry) (walletrpc.CompactTxStreamerServer, error) {
|
||||
db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?_busy_timeout=10000&cache=shared", dbPath))
|
||||
db.SetMaxOpenConns(1)
|
||||
if err != nil {
|
||||
|
@ -44,7 +47,7 @@ func NewSQLiteStreamer(dbPath string, client *rpcclient.Client) (walletrpc.Compa
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &SqlStreamer{db, client}, nil
|
||||
return &SqlStreamer{db, client, log}, nil
|
||||
}
|
||||
|
||||
func (s *SqlStreamer) GracefulStop() error {
|
||||
|
@ -153,6 +156,30 @@ func (s *SqlStreamer) GetTransaction(ctx context.Context, txf *walletrpc.TxFilte
|
|||
return &walletrpc.RawTransaction{Data: txBytes}, nil
|
||||
}
|
||||
|
||||
// GetLightdInfo gets the LightWalletD (this server) info
|
||||
func (s *SqlStreamer) GetLightdInfo(ctx context.Context, in *walletrpc.Empty) (*walletrpc.LightdInfo, error) {
|
||||
saplingHeight, blockHeight, chainName, consensusBranchId, err := common.GetSaplingInfo(s.client)
|
||||
|
||||
if err != nil {
|
||||
s.log.WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
}).Warn("Unable to get sapling activation height")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO these are called Error but they aren't at the moment.
|
||||
// A success will return code 0 and message txhash.
|
||||
return &walletrpc.LightdInfo{
|
||||
Version: "0.2.0",
|
||||
Vendor: "ECC LightWalletD",
|
||||
TaddrSupport: true,
|
||||
ChainName: chainName,
|
||||
SaplingActivationHeight: uint64(saplingHeight),
|
||||
ConsensusBranchId: consensusBranchId,
|
||||
BlockHeight: uint64(blockHeight),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SendTransaction forwards raw transaction bytes to a zcashd instance over JSON-RPC
|
||||
func (s *SqlStreamer) SendTransaction(ctx context.Context, rawtx *walletrpc.RawTransaction) (*walletrpc.SendResponse, error) {
|
||||
// sendrawtransaction "hexstring" ( allowhighfees )
|
||||
|
|
28
go.mod
28
go.mod
|
@ -3,16 +3,20 @@ module github.com/zcash-hackworks/lightwalletd
|
|||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d
|
||||
github.com/golang/protobuf v1.2.0
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/mattn/go-sqlite3 v1.10.0
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/sirupsen/logrus v1.2.0
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3
|
||||
google.golang.org/grpc v1.17.0
|
||||
gopkg.in/ini.v1 v1.41.0
|
||||
github.com/btcsuite/btcd v0.20.1-beta
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de // indirect
|
||||
github.com/mattn/go-sqlite3 v1.13.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/smartystreets/goconvey v1.6.4 // indirect
|
||||
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c // indirect
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 // indirect
|
||||
golang.org/x/net v0.0.0-20191125084936-ffdde1057850 // indirect
|
||||
golang.org/x/sys v0.0.0-20191126131656-8a8471f7e56d // indirect
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 // indirect
|
||||
google.golang.org/genproto v0.0.0-20191115221424-83cc0476cb11 // indirect
|
||||
google.golang.org/grpc v1.25.1
|
||||
gopkg.in/ini.v1 v1.51.0
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc // indirect
|
||||
)
|
||||
|
|
94
go.sum
94
go.sum
|
@ -1,11 +1,12 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d h1:xG8Pj6Y6J760xwETNmMzmlt38QSwz0BLp1cZ09g27uw=
|
||||
github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a h1:RQMUrEILyYJEoAT34XS/kLu40vC0+po/UfxrBBA4qZE=
|
||||
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
|
@ -13,10 +14,13 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
|
|||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
|
@ -24,64 +28,92 @@ github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
|
|||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.13.0 h1:LnJI81JidiW9r7pS/hXe6cFeO5EXNq7KbfvoJLRI69c=
|
||||
github.com/mattn/go-sqlite3 v1.13.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c h1:/nJuwDLoL/zrqY6gf57vxC+Pi+pZ8bfhpPkicO5H7W4=
|
||||
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3 h1:eH6Eip3UpmR+yM/qI9Ijluzb1bNv/cAU/n+6l8tRSis=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20191125084936-ffdde1057850 h1:Vq85/r8R9IdcUHmZ0/nQlUg1v15rzvQ2sHdnZAj/x7s=
|
||||
golang.org/x/net v0.0.0-20191125084936-ffdde1057850/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191126131656-8a8471f7e56d h1:kCXqdOO2GMlu0vCsEMBXwj/b0E9wyFpNPBpuv/go/F8=
|
||||
golang.org/x/sys v0.0.0-20191126131656-8a8471f7e56d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20191115221424-83cc0476cb11 h1:51D++eCgOHufw5VfDE9Uzqyyc+OyQIjb9hkYy9LN5Fk=
|
||||
google.golang.org/genproto v0.0.0-20191115221424-83cc0476cb11/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.41.0 h1:Ka3ViY6gNYSKiVy71zXBEqKplnV35ImDLVG+8uoIklE=
|
||||
gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
|
@ -36,10 +36,17 @@ var (
|
|||
// interface from crypto/elliptic.
|
||||
type KoblitzCurve struct {
|
||||
*elliptic.CurveParams
|
||||
q *big.Int
|
||||
|
||||
// q is the value (P+1)/4 used to compute the square root of field
|
||||
// elements.
|
||||
q *big.Int
|
||||
|
||||
H int // cofactor of the curve.
|
||||
halfOrder *big.Int // half the order N
|
||||
|
||||
// fieldB is the constant B of the curve as a fieldVal.
|
||||
fieldB *fieldVal
|
||||
|
||||
// byteSize is simply the bit size / 8 and is provided for convenience
|
||||
// since it is calculated repeatedly.
|
||||
byteSize int
|
||||
|
@ -879,12 +886,22 @@ func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
|
|||
return curve.fieldJacobianToBigAffine(qx, qy, qz)
|
||||
}
|
||||
|
||||
// QPlus1Div4 returns the Q+1/4 constant for the curve for use in calculating
|
||||
// square roots via exponention.
|
||||
// QPlus1Div4 returns the (P+1)/4 constant for the curve for use in calculating
|
||||
// square roots via exponentiation.
|
||||
//
|
||||
// DEPRECATED: The actual value returned is (P+1)/4, where as the original
|
||||
// method name implies that this value is (((P+1)/4)+1)/4. This method is kept
|
||||
// to maintain backwards compatibility of the API. Use Q() instead.
|
||||
func (curve *KoblitzCurve) QPlus1Div4() *big.Int {
|
||||
return curve.q
|
||||
}
|
||||
|
||||
// Q returns the (P+1)/4 constant for the curve for use in calculating square
|
||||
// roots via exponentiation.
|
||||
func (curve *KoblitzCurve) Q() *big.Int {
|
||||
return curve.q
|
||||
}
|
||||
|
||||
var initonce sync.Once
|
||||
var secp256k1 KoblitzCurve
|
||||
|
||||
|
@ -917,6 +934,7 @@ func initS256() {
|
|||
big.NewInt(1)), big.NewInt(4))
|
||||
secp256k1.H = 1
|
||||
secp256k1.halfOrder = new(big.Int).Rsh(secp256k1.N, 1)
|
||||
secp256k1.fieldB = new(fieldVal).SetByteSlice(secp256k1.B.Bytes())
|
||||
|
||||
// Provided for convenience since this gets computed repeatedly.
|
||||
secp256k1.byteSize = secp256k1.BitSize / 8
|
||||
|
|
|
@ -102,6 +102,20 @@ const (
|
|||
fieldPrimeWordOne = 0x3ffffbf
|
||||
)
|
||||
|
||||
var (
|
||||
// fieldQBytes is the value Q = (P+1)/4 for the secp256k1 prime P. This
|
||||
// value is used to efficiently compute the square root of values in the
|
||||
// field via exponentiation. The value of Q in hex is:
|
||||
//
|
||||
// Q = 3fffffffffffffffffffffffffffffffffffffffffffffffffffffffbfffff0c
|
||||
fieldQBytes = []byte{
|
||||
0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x0c,
|
||||
}
|
||||
)
|
||||
|
||||
// fieldVal implements optimized fixed-precision arithmetic over the
|
||||
// secp256k1 finite field. This means all arithmetic is performed modulo
|
||||
// 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f. It
|
||||
|
@ -1221,3 +1235,118 @@ func (f *fieldVal) Inverse() *fieldVal {
|
|||
f.Square().Square().Square().Square().Square() // f = a^(2^256 - 4294968320)
|
||||
return f.Mul(&a45) // f = a^(2^256 - 4294968275) = a^(p-2)
|
||||
}
|
||||
|
||||
// SqrtVal computes the square root of x modulo the curve's prime, and stores
|
||||
// the result in f. The square root is computed via exponentiation of x by the
|
||||
// value Q = (P+1)/4 using the curve's precomputed big-endian representation of
|
||||
// the Q. This method uses a modified version of square-and-multiply
|
||||
// exponentiation over secp256k1 fieldVals to operate on bytes instead of bits,
|
||||
// which offers better performance over both big.Int exponentiation and bit-wise
|
||||
// square-and-multiply.
|
||||
//
|
||||
// NOTE: This method only works when P is intended to be the secp256k1 prime and
|
||||
// is not constant time. The returned value is of magnitude 1, but is
|
||||
// denormalized.
|
||||
func (f *fieldVal) SqrtVal(x *fieldVal) *fieldVal {
|
||||
// The following computation iteratively computes x^((P+1)/4) = x^Q
|
||||
// using the recursive, piece-wise definition:
|
||||
//
|
||||
// x^n = (x^2)^(n/2) mod P if n is even
|
||||
// x^n = x(x^2)^(n-1/2) mod P if n is odd
|
||||
//
|
||||
// Given n in its big-endian representation b_k, ..., b_0, x^n can be
|
||||
// computed by defining the sequence r_k+1, ..., r_0, where:
|
||||
//
|
||||
// r_k+1 = 1
|
||||
// r_i = (r_i+1)^2 * x^b_i for i = k, ..., 0
|
||||
//
|
||||
// The final value r_0 = x^n.
|
||||
//
|
||||
// See https://en.wikipedia.org/wiki/Exponentiation_by_squaring for more
|
||||
// details.
|
||||
//
|
||||
// This can be further optimized, by observing that the value of Q in
|
||||
// secp256k1 has the value:
|
||||
//
|
||||
// Q = 3fffffffffffffffffffffffffffffffffffffffffffffffffffffffbfffff0c
|
||||
//
|
||||
// We can unroll the typical bit-wise interpretation of the
|
||||
// exponentiation algorithm above to instead operate on bytes.
|
||||
// This reduces the number of comparisons by an order of magnitude,
|
||||
// reducing the overhead of failed branch predictions and additional
|
||||
// comparisons in this method.
|
||||
//
|
||||
// Since there there are only 4 unique bytes of Q, this keeps the jump
|
||||
// table small without the need to handle all possible 8-bit values.
|
||||
// Further, we observe that 29 of the 32 bytes are 0xff; making the
|
||||
// first case handle 0xff therefore optimizes the hot path.
|
||||
f.SetInt(1)
|
||||
for _, b := range fieldQBytes {
|
||||
switch b {
|
||||
|
||||
// Most common case, where all 8 bits are set.
|
||||
case 0xff:
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
|
||||
// First byte of Q (0x3f), where all but the top two bits are
|
||||
// set. Note that this case only applies six operations, since
|
||||
// the highest bit of Q resides in bit six of the first byte. We
|
||||
// ignore the first two bits, since squaring for these bits will
|
||||
// result in an invalid result. We forgo squaring f before the
|
||||
// first multiply, since 1^2 = 1.
|
||||
case 0x3f:
|
||||
f.Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
|
||||
// Byte 28 of Q (0xbf), where only bit 7 is unset.
|
||||
case 0xbf:
|
||||
f.Square().Mul(x)
|
||||
f.Square()
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
|
||||
// Byte 31 of Q (0x0c), where only bits 3 and 4 are set.
|
||||
default:
|
||||
f.Square()
|
||||
f.Square()
|
||||
f.Square()
|
||||
f.Square()
|
||||
f.Square().Mul(x)
|
||||
f.Square().Mul(x)
|
||||
f.Square()
|
||||
f.Square()
|
||||
}
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// Sqrt computes the square root of f modulo the curve's prime, and stores the
|
||||
// result in f. The square root is computed via exponentiation of x by the value
|
||||
// Q = (P+1)/4 using the curve's precomputed big-endian representation of the Q.
|
||||
// This method uses a modified version of square-and-multiply exponentiation
|
||||
// over secp256k1 fieldVals to operate on bytes instead of bits, which offers
|
||||
// better performance over both big.Int exponentiation and bit-wise
|
||||
// square-and-multiply.
|
||||
//
|
||||
// NOTE: This method only works when P is intended to be the secp256k1 prime and
|
||||
// is not constant time. The returned value is of magnitude 1, but is
|
||||
// denormalized.
|
||||
func (f *fieldVal) Sqrt() *fieldVal {
|
||||
return f.SqrtVal(f)
|
||||
}
|
||||
|
|
|
@ -22,41 +22,40 @@ func isOdd(a *big.Int) bool {
|
|||
return a.Bit(0) == 1
|
||||
}
|
||||
|
||||
// decompressPoint decompresses a point on the given curve given the X point and
|
||||
// decompressPoint decompresses a point on the secp256k1 curve given the X point and
|
||||
// the solution to use.
|
||||
func decompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, error) {
|
||||
// TODO: This will probably only work for secp256k1 due to
|
||||
// optimizations.
|
||||
func decompressPoint(curve *KoblitzCurve, bigX *big.Int, ybit bool) (*big.Int, error) {
|
||||
var x fieldVal
|
||||
x.SetByteSlice(bigX.Bytes())
|
||||
|
||||
// Y = +-sqrt(x^3 + B)
|
||||
x3 := new(big.Int).Mul(x, x)
|
||||
x3.Mul(x3, x)
|
||||
x3.Add(x3, curve.Params().B)
|
||||
x3.Mod(x3, curve.Params().P)
|
||||
// Compute x^3 + B mod p.
|
||||
var x3 fieldVal
|
||||
x3.SquareVal(&x).Mul(&x)
|
||||
x3.Add(curve.fieldB).Normalize()
|
||||
|
||||
// Now calculate sqrt mod p of x^3 + B
|
||||
// This code used to do a full sqrt based on tonelli/shanks,
|
||||
// but this was replaced by the algorithms referenced in
|
||||
// https://bitcointalk.org/index.php?topic=162805.msg1712294#msg1712294
|
||||
y := new(big.Int).Exp(x3, curve.QPlus1Div4(), curve.Params().P)
|
||||
|
||||
if ybit != isOdd(y) {
|
||||
y.Sub(curve.Params().P, y)
|
||||
var y fieldVal
|
||||
y.SqrtVal(&x3).Normalize()
|
||||
if ybit != y.IsOdd() {
|
||||
y.Negate(1).Normalize()
|
||||
}
|
||||
|
||||
// Check that y is a square root of x^3 + B.
|
||||
y2 := new(big.Int).Mul(y, y)
|
||||
y2.Mod(y2, curve.Params().P)
|
||||
if y2.Cmp(x3) != 0 {
|
||||
var y2 fieldVal
|
||||
y2.SquareVal(&y).Normalize()
|
||||
if !y2.Equals(&x3) {
|
||||
return nil, fmt.Errorf("invalid square root")
|
||||
}
|
||||
|
||||
// Verify that y-coord has expected parity.
|
||||
if ybit != isOdd(y) {
|
||||
if ybit != y.IsOdd() {
|
||||
return nil, fmt.Errorf("ybit doesn't match oddness")
|
||||
}
|
||||
|
||||
return y, nil
|
||||
return new(big.Int).SetBytes(y.Bytes()[:]), nil
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -102,6 +101,17 @@ func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, err err
|
|||
if format == pubkeyHybrid && ybit != isOdd(pubkey.Y) {
|
||||
return nil, fmt.Errorf("ybit doesn't match oddness")
|
||||
}
|
||||
|
||||
if pubkey.X.Cmp(pubkey.Curve.Params().P) >= 0 {
|
||||
return nil, fmt.Errorf("pubkey X parameter is >= to P")
|
||||
}
|
||||
if pubkey.Y.Cmp(pubkey.Curve.Params().P) >= 0 {
|
||||
return nil, fmt.Errorf("pubkey Y parameter is >= to P")
|
||||
}
|
||||
if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) {
|
||||
return nil, fmt.Errorf("pubkey isn't on secp256k1 curve")
|
||||
}
|
||||
|
||||
case PubKeyBytesLenCompressed:
|
||||
// format is 0x2 | solution, <X coordinate>
|
||||
// solution determines which solution of the curve we use.
|
||||
|
@ -115,20 +125,12 @@ func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, err err
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
default: // wrong!
|
||||
return nil, fmt.Errorf("invalid pub key length %d",
|
||||
len(pubKeyStr))
|
||||
}
|
||||
|
||||
if pubkey.X.Cmp(pubkey.Curve.Params().P) >= 0 {
|
||||
return nil, fmt.Errorf("pubkey X parameter is >= to P")
|
||||
}
|
||||
if pubkey.Y.Cmp(pubkey.Curve.Params().P) >= 0 {
|
||||
return nil, fmt.Errorf("pubkey Y parameter is >= to P")
|
||||
}
|
||||
if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) {
|
||||
return nil, fmt.Errorf("pubkey isn't on secp256k1 curve")
|
||||
}
|
||||
return &pubkey, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -85,10 +85,10 @@ func (sig *Signature) IsEqual(otherSig *Signature) bool {
|
|||
sig.S.Cmp(otherSig.S) == 0
|
||||
}
|
||||
|
||||
// minSigLen is the minimum length of a DER encoded signature and is
|
||||
// when both R and S are 1 byte each.
|
||||
// MinSigLen is the minimum length of a DER encoded signature and is when both R
|
||||
// and S are 1 byte each.
|
||||
// 0x30 + <1-byte> + 0x02 + 0x01 + <byte> + 0x2 + 0x01 + <byte>
|
||||
const minSigLen = 8
|
||||
const MinSigLen = 8
|
||||
|
||||
func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error) {
|
||||
// Originally this code used encoding/asn1 in order to parse the
|
||||
|
@ -103,7 +103,7 @@ func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error)
|
|||
|
||||
signature := &Signature{}
|
||||
|
||||
if len(sigStr) < minSigLen {
|
||||
if len(sigStr) < MinSigLen {
|
||||
return nil, errors.New("malformed signature: too short")
|
||||
}
|
||||
// 0x30
|
||||
|
@ -118,7 +118,7 @@ func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error)
|
|||
|
||||
// siglen should be less than the entire message and greater than
|
||||
// the minimal message size.
|
||||
if int(siglen+2) > len(sigStr) || int(siglen+2) < minSigLen {
|
||||
if int(siglen+2) > len(sigStr) || int(siglen+2) < MinSigLen {
|
||||
return nil, errors.New("malformed signature: bad length")
|
||||
}
|
||||
// trim the slice we're working on so we only look at what matters.
|
||||
|
@ -276,7 +276,7 @@ func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
|
|||
}
|
||||
|
||||
// recoverKeyFromSignature recovers a public key from the signature "sig" on the
|
||||
// given message hash "msg". Based on the algorithm found in section 5.1.5 of
|
||||
// given message hash "msg". Based on the algorithm found in section 4.1.6 of
|
||||
// SEC 1 Ver 2.0, page 47-48 (53 and 54 in the pdf). This performs the details
|
||||
// in the inner loop in Step 1. The counter provided is actually the j parameter
|
||||
// of the loop * 2 - on the first iteration of j we do the R case, else the -R
|
||||
|
|
|
@ -638,6 +638,7 @@ func NewSearchRawTransactionsCmd(address string, verbose, skip, count *int, vinE
|
|||
type SendRawTransactionCmd struct {
|
||||
HexTx string
|
||||
AllowHighFees *bool `jsonrpcdefault:"false"`
|
||||
MaxFeeRate *int32
|
||||
}
|
||||
|
||||
// NewSendRawTransactionCmd returns a new instance which can be used to issue a
|
||||
|
@ -652,6 +653,17 @@ func NewSendRawTransactionCmd(hexTx string, allowHighFees *bool) *SendRawTransac
|
|||
}
|
||||
}
|
||||
|
||||
// NewSendRawTransactionCmd returns a new instance which can be used to issue a
|
||||
// sendrawtransaction JSON-RPC command to a bitcoind node.
|
||||
//
|
||||
// A 0 maxFeeRate indicates that a maximum fee rate won't be enforced.
|
||||
func NewBitcoindSendRawTransactionCmd(hexTx string, maxFeeRate int32) *SendRawTransactionCmd {
|
||||
return &SendRawTransactionCmd{
|
||||
HexTx: hexTx,
|
||||
MaxFeeRate: &maxFeeRate,
|
||||
}
|
||||
}
|
||||
|
||||
// SetGenerateCmd defines the setgenerate JSON-RPC command.
|
||||
type SetGenerateCmd struct {
|
||||
Generate bool
|
||||
|
|
|
@ -90,28 +90,61 @@ type SoftForkDescription struct {
|
|||
// Bip9SoftForkDescription describes the current state of a defined BIP0009
|
||||
// version bits soft-fork.
|
||||
type Bip9SoftForkDescription struct {
|
||||
Status string `json:"status"`
|
||||
Bit uint8 `json:"bit"`
|
||||
StartTime int64 `json:"startTime"`
|
||||
Timeout int64 `json:"timeout"`
|
||||
Since int32 `json:"since"`
|
||||
Status string `json:"status"`
|
||||
Bit uint8 `json:"bit"`
|
||||
StartTime1 int64 `json:"startTime"`
|
||||
StartTime2 int64 `json:"start_time"`
|
||||
Timeout int64 `json:"timeout"`
|
||||
Since int32 `json:"since"`
|
||||
}
|
||||
|
||||
// StartTime returns the starting time of the softfork as a Unix epoch.
|
||||
func (d *Bip9SoftForkDescription) StartTime() int64 {
|
||||
if d.StartTime1 != 0 {
|
||||
return d.StartTime1
|
||||
}
|
||||
return d.StartTime2
|
||||
}
|
||||
|
||||
// SoftForks describes the current softforks enabled by the backend. Softforks
|
||||
// activated through BIP9 are grouped together separate from any other softforks
|
||||
// with different activation types.
|
||||
type SoftForks struct {
|
||||
SoftForks []*SoftForkDescription `json:"softforks"`
|
||||
Bip9SoftForks map[string]*Bip9SoftForkDescription `json:"bip9_softforks"`
|
||||
}
|
||||
|
||||
// UnifiedSoftForks describes a softforks in a general manner, irrespective of
|
||||
// its activation type. This was a format introduced by bitcoind v0.19.0
|
||||
type UnifiedSoftFork struct {
|
||||
Type string `json:"type"`
|
||||
BIP9SoftForkDescription *Bip9SoftForkDescription `json:"bip9"`
|
||||
Height int32 `json:"height"`
|
||||
Active bool `json:"active"`
|
||||
}
|
||||
|
||||
// UnifiedSoftForks describes the current softforks enabled the by the backend
|
||||
// in a unified manner, i.e, softforks with different activation types are
|
||||
// grouped together. This was a format introduced by bitcoind v0.19.0
|
||||
type UnifiedSoftForks struct {
|
||||
SoftForks map[string]*UnifiedSoftFork `json:"softforks"`
|
||||
}
|
||||
|
||||
// GetBlockChainInfoResult models the data returned from the getblockchaininfo
|
||||
// command.
|
||||
type GetBlockChainInfoResult struct {
|
||||
Chain string `json:"chain"`
|
||||
Blocks int32 `json:"blocks"`
|
||||
Headers int32 `json:"headers"`
|
||||
BestBlockHash string `json:"bestblockhash"`
|
||||
Difficulty float64 `json:"difficulty"`
|
||||
MedianTime int64 `json:"mediantime"`
|
||||
VerificationProgress float64 `json:"verificationprogress,omitempty"`
|
||||
Pruned bool `json:"pruned"`
|
||||
PruneHeight int32 `json:"pruneheight,omitempty"`
|
||||
ChainWork string `json:"chainwork,omitempty"`
|
||||
SoftForks []*SoftForkDescription `json:"softforks"`
|
||||
Bip9SoftForks map[string]*Bip9SoftForkDescription `json:"bip9_softforks"`
|
||||
Chain string `json:"chain"`
|
||||
Blocks int32 `json:"blocks"`
|
||||
Headers int32 `json:"headers"`
|
||||
BestBlockHash string `json:"bestblockhash"`
|
||||
Difficulty float64 `json:"difficulty"`
|
||||
MedianTime int64 `json:"mediantime"`
|
||||
VerificationProgress float64 `json:"verificationprogress,omitempty"`
|
||||
Pruned bool `json:"pruned"`
|
||||
PruneHeight int32 `json:"pruneheight,omitempty"`
|
||||
ChainWork string `json:"chainwork,omitempty"`
|
||||
*SoftForks
|
||||
*UnifiedSoftForks
|
||||
}
|
||||
|
||||
// GetBlockTemplateResultTx models the transactions field of the
|
||||
|
@ -265,6 +298,7 @@ type GetPeerInfoResult struct {
|
|||
type GetRawMempoolVerboseResult struct {
|
||||
Size int32 `json:"size"`
|
||||
Vsize int32 `json:"vsize"`
|
||||
Weight int32 `json:"weight"`
|
||||
Fee float64 `json:"fee"`
|
||||
Time int64 `json:"time"`
|
||||
Height int64 `json:"height"`
|
||||
|
@ -505,6 +539,7 @@ type TxRawResult struct {
|
|||
Hash string `json:"hash,omitempty"`
|
||||
Size int32 `json:"size,omitempty"`
|
||||
Vsize int32 `json:"vsize,omitempty"`
|
||||
Weight int32 `json:"weight,omitempty"`
|
||||
Version int32 `json:"version"`
|
||||
LockTime uint32 `json:"locktime"`
|
||||
Vin []Vin `json:"vin"`
|
||||
|
@ -523,6 +558,7 @@ type SearchRawTransactionsResult struct {
|
|||
Hash string `json:"hash"`
|
||||
Size string `json:"size"`
|
||||
Vsize string `json:"vsize"`
|
||||
Weight string `json:"weight"`
|
||||
Version int32 `json:"version"`
|
||||
LockTime uint32 `json:"locktime"`
|
||||
Vin []VinPrevOut `json:"vin"`
|
||||
|
|
|
@ -76,6 +76,9 @@ const (
|
|||
ErrRPCInvalidTxVout RPCErrorCode = -5
|
||||
ErrRPCRawTxString RPCErrorCode = -32602
|
||||
ErrRPCDecodeHexString RPCErrorCode = -22
|
||||
ErrRPCTxError RPCErrorCode = -25
|
||||
ErrRPCTxRejected RPCErrorCode = -26
|
||||
ErrRPCTxAlreadyInChain RPCErrorCode = -27
|
||||
)
|
||||
|
||||
// Errors that are specific to btcd.
|
||||
|
|
|
@ -272,6 +272,13 @@ var MainNetParams = Params{
|
|||
{343185, newHashFromStr("0000000000000000072b8bf361d01a6ba7d445dd024203fafc78768ed4368554")},
|
||||
{352940, newHashFromStr("000000000000000010755df42dba556bb72be6a32f3ce0b6941ce4430152c9ff")},
|
||||
{382320, newHashFromStr("00000000000000000a8dc6ed5b133d0eb2fd6af56203e4159789b092defd8ab2")},
|
||||
{400000, newHashFromStr("000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f")},
|
||||
{430000, newHashFromStr("000000000000000001868b2bb3a285f3cc6b33ea234eb70facf4dcdf22186b87")},
|
||||
{460000, newHashFromStr("000000000000000000ef751bbce8e744ad303c47ece06c8d863e4d417efc258c")},
|
||||
{490000, newHashFromStr("000000000000000000de069137b17b8d5a3dfbd5b145b2dcfb203f15d0c4de90")},
|
||||
{520000, newHashFromStr("0000000000000000000d26984c0229c9f6962dc74db0a6d525f2f1640396f69c")},
|
||||
{550000, newHashFromStr("000000000000000000223b7a2298fb1c6c75fb0efc28a4c56853ff4112ec6bc9")},
|
||||
{560000, newHashFromStr("0000000000000000002c7b276daf6efb2b6aa68e2ce3be67ef925b3264ae7122")},
|
||||
},
|
||||
|
||||
// Consensus rule change deployments.
|
||||
|
@ -439,6 +446,9 @@ var TestNet3Params = Params{
|
|||
{800010, newHashFromStr("000000000017ed35296433190b6829db01e657d80631d43f5983fa403bfdb4c1")},
|
||||
{900000, newHashFromStr("0000000000356f8d8924556e765b7a94aaebc6b5c8685dcfa2b1ee8b41acd89b")},
|
||||
{1000007, newHashFromStr("00000000001ccb893d8a1f25b70ad173ce955e5f50124261bbbc50379a612ddf")},
|
||||
{1100007, newHashFromStr("00000000000abc7b2cd18768ab3dee20857326a818d1946ed6796f42d66dd1e8")},
|
||||
{1200007, newHashFromStr("00000000000004f2dc41845771909db57e04191714ed8c963f7e56713a7b6cea")},
|
||||
{1300007, newHashFromStr("0000000072eab69d54df75107c052b26b0395b44f77578184293bf1bb1dbd9fa")},
|
||||
},
|
||||
|
||||
// Consensus rule change deployments.
|
||||
|
|
|
@ -253,16 +253,15 @@ func (c *Client) GetDifficulty() (float64, error) {
|
|||
|
||||
// FutureGetBlockChainInfoResult is a promise to deliver the result of a
|
||||
// GetBlockChainInfoAsync RPC invocation (or an applicable error).
|
||||
type FutureGetBlockChainInfoResult chan *response
|
||||
|
||||
// Receive waits for the response promised by the future and returns chain info
|
||||
// result provided by the server.
|
||||
func (r FutureGetBlockChainInfoResult) Receive() (*btcjson.GetBlockChainInfoResult, error) {
|
||||
res, err := receiveFuture(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
type FutureGetBlockChainInfoResult struct {
|
||||
client *Client
|
||||
Response chan *response
|
||||
}
|
||||
|
||||
// unmarshalPartialGetBlockChainInfoResult unmarshals the response into an
|
||||
// instance of GetBlockChainInfoResult without populating the SoftForks and
|
||||
// UnifiedSoftForks fields.
|
||||
func unmarshalPartialGetBlockChainInfoResult(res []byte) (*btcjson.GetBlockChainInfoResult, error) {
|
||||
var chainInfo btcjson.GetBlockChainInfoResult
|
||||
if err := json.Unmarshal(res, &chainInfo); err != nil {
|
||||
return nil, err
|
||||
|
@ -270,6 +269,59 @@ func (r FutureGetBlockChainInfoResult) Receive() (*btcjson.GetBlockChainInfoResu
|
|||
return &chainInfo, nil
|
||||
}
|
||||
|
||||
// unmarshalGetBlockChainInfoResultSoftForks properly unmarshals the softforks
|
||||
// related fields into the GetBlockChainInfoResult instance.
|
||||
func unmarshalGetBlockChainInfoResultSoftForks(chainInfo *btcjson.GetBlockChainInfoResult,
|
||||
version BackendVersion, res []byte) error {
|
||||
|
||||
switch version {
|
||||
// Versions of bitcoind on or after v0.19.0 use the unified format.
|
||||
case BitcoindPost19:
|
||||
var softForks btcjson.UnifiedSoftForks
|
||||
if err := json.Unmarshal(res, &softForks); err != nil {
|
||||
return err
|
||||
}
|
||||
chainInfo.UnifiedSoftForks = &softForks
|
||||
|
||||
// All other versions use the original format.
|
||||
default:
|
||||
var softForks btcjson.SoftForks
|
||||
if err := json.Unmarshal(res, &softForks); err != nil {
|
||||
return err
|
||||
}
|
||||
chainInfo.SoftForks = &softForks
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Receive waits for the response promised by the future and returns chain info
|
||||
// result provided by the server.
|
||||
func (r FutureGetBlockChainInfoResult) Receive() (*btcjson.GetBlockChainInfoResult, error) {
|
||||
res, err := receiveFuture(r.Response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chainInfo, err := unmarshalPartialGetBlockChainInfoResult(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Inspect the version to determine how we'll need to parse the
|
||||
// softforks from the response.
|
||||
version, err := r.client.BackendVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = unmarshalGetBlockChainInfoResultSoftForks(chainInfo, version, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return chainInfo, nil
|
||||
}
|
||||
|
||||
// GetBlockChainInfoAsync returns an instance of a type that can be used to get
|
||||
// the result of the RPC at some future time by invoking the Receive function
|
||||
// on the returned instance.
|
||||
|
@ -277,7 +329,10 @@ func (r FutureGetBlockChainInfoResult) Receive() (*btcjson.GetBlockChainInfoResu
|
|||
// See GetBlockChainInfo for the blocking version and more details.
|
||||
func (c *Client) GetBlockChainInfoAsync() FutureGetBlockChainInfoResult {
|
||||
cmd := btcjson.NewGetBlockChainInfoCmd()
|
||||
return c.sendCmd(cmd)
|
||||
return FutureGetBlockChainInfoResult{
|
||||
client: c,
|
||||
Response: c.sendCmd(cmd),
|
||||
}
|
||||
}
|
||||
|
||||
// GetBlockChainInfo returns information related to the processing state of
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -103,6 +104,22 @@ type jsonRequest struct {
|
|||
responseChan chan *response
|
||||
}
|
||||
|
||||
// BackendVersion represents the version of the backend the client is currently
|
||||
// connected to.
|
||||
type BackendVersion uint8
|
||||
|
||||
const (
|
||||
// BitcoindPre19 represents a bitcoind version before 0.19.0.
|
||||
BitcoindPre19 BackendVersion = iota
|
||||
|
||||
// BitcoindPost19 represents a bitcoind version equal to or greater than
|
||||
// 0.19.0.
|
||||
BitcoindPost19
|
||||
|
||||
// Btcd represents a catch-all btcd version.
|
||||
Btcd
|
||||
)
|
||||
|
||||
// Client represents a Bitcoin RPC client which allows easy access to the
|
||||
// various RPC methods available on a Bitcoin RPC server. Each of the wrapper
|
||||
// functions handle the details of converting the passed and return types to and
|
||||
|
@ -129,6 +146,11 @@ type Client struct {
|
|||
// POST mode.
|
||||
httpClient *http.Client
|
||||
|
||||
// backendVersion is the version of the backend the client is currently
|
||||
// connected to. This should be retrieved through GetVersion.
|
||||
backendVersionMu sync.Mutex
|
||||
backendVersion *BackendVersion
|
||||
|
||||
// mtx is a mutex to protect access to connection related fields.
|
||||
mtx sync.Mutex
|
||||
|
||||
|
@ -659,6 +681,12 @@ out:
|
|||
log.Infof("Reestablished connection to RPC server %s",
|
||||
c.config.Host)
|
||||
|
||||
// Reset the version in case the backend was
|
||||
// disconnected due to an upgrade.
|
||||
c.backendVersionMu.Lock()
|
||||
c.backendVersion = nil
|
||||
c.backendVersionMu.Unlock()
|
||||
|
||||
// Reset the connection state and signal the reconnect
|
||||
// has happened.
|
||||
c.wsConn = wsConn
|
||||
|
@ -1332,3 +1360,84 @@ func (c *Client) Connect(tries int) error {
|
|||
// All connection attempts failed, so return the last error.
|
||||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
// bitcoind19Str is the string representation of bitcoind v0.19.0.
|
||||
bitcoind19Str = "0.19.0"
|
||||
|
||||
// bitcoindVersionPrefix specifies the prefix included in every bitcoind
|
||||
// version exposed through GetNetworkInfo.
|
||||
bitcoindVersionPrefix = "/Satoshi:"
|
||||
|
||||
// bitcoindVersionSuffix specifies the suffix included in every bitcoind
|
||||
// version exposed through GetNetworkInfo.
|
||||
bitcoindVersionSuffix = "/"
|
||||
)
|
||||
|
||||
// parseBitcoindVersion parses the bitcoind version from its string
|
||||
// representation.
|
||||
func parseBitcoindVersion(version string) BackendVersion {
|
||||
// Trim the version of its prefix and suffix to determine the
|
||||
// appropriate version number.
|
||||
version = strings.TrimPrefix(
|
||||
strings.TrimSuffix(version, bitcoindVersionSuffix),
|
||||
bitcoindVersionPrefix,
|
||||
)
|
||||
switch {
|
||||
case version < bitcoind19Str:
|
||||
return BitcoindPre19
|
||||
default:
|
||||
return BitcoindPost19
|
||||
}
|
||||
}
|
||||
|
||||
// BackendVersion retrieves the version of the backend the client is currently
|
||||
// connected to.
|
||||
func (c *Client) BackendVersion() (BackendVersion, error) {
|
||||
c.backendVersionMu.Lock()
|
||||
defer c.backendVersionMu.Unlock()
|
||||
|
||||
if c.backendVersion != nil {
|
||||
return *c.backendVersion, nil
|
||||
}
|
||||
|
||||
// We'll start by calling GetInfo. This method doesn't exist for
|
||||
// bitcoind nodes as of v0.16.0, so we'll assume the client is connected
|
||||
// to a btcd backend if it does exist.
|
||||
info, err := c.GetInfo()
|
||||
|
||||
switch err := err.(type) {
|
||||
// Parse the btcd version and cache it.
|
||||
case nil:
|
||||
log.Debugf("Detected btcd version: %v", info.Version)
|
||||
version := Btcd
|
||||
c.backendVersion = &version
|
||||
return *c.backendVersion, nil
|
||||
|
||||
// Inspect the RPC error to ensure the method was not found, otherwise
|
||||
// we actually ran into an error.
|
||||
case *btcjson.RPCError:
|
||||
if err.Code != btcjson.ErrRPCMethodNotFound.Code {
|
||||
return 0, fmt.Errorf("unable to detect btcd version: "+
|
||||
"%v", err)
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, fmt.Errorf("unable to detect btcd version: %v", err)
|
||||
}
|
||||
|
||||
// Since the GetInfo method was not found, we assume the client is
|
||||
// connected to a bitcoind backend, which exposes its version through
|
||||
// GetNetworkInfo.
|
||||
networkInfo, err := c.GetNetworkInfo()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to detect bitcoind version: %v", err)
|
||||
}
|
||||
|
||||
// Parse the bitcoind version and cache it.
|
||||
log.Debugf("Detected bitcoind version: %v", networkInfo.SubVersion)
|
||||
version := parseBitcoindVersion(networkInfo.SubVersion)
|
||||
c.backendVersion = &version
|
||||
|
||||
return *c.backendVersion, nil
|
||||
}
|
||||
|
|
|
@ -244,6 +244,43 @@ func (c *Client) Ping() error {
|
|||
return c.PingAsync().Receive()
|
||||
}
|
||||
|
||||
// FutureGetNetworkInfoResult is a future promise to deliver the result of a
|
||||
// GetNetworkInfoAsync RPC invocation (or an applicable error).
|
||||
type FutureGetNetworkInfoResult chan *response
|
||||
|
||||
// Receive waits for the response promised by the future and returns data about
|
||||
// the current network.
|
||||
func (r FutureGetNetworkInfoResult) Receive() (*btcjson.GetNetworkInfoResult, error) {
|
||||
res, err := receiveFuture(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal result as an array of getpeerinfo result objects.
|
||||
var networkInfo btcjson.GetNetworkInfoResult
|
||||
err = json.Unmarshal(res, &networkInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &networkInfo, nil
|
||||
}
|
||||
|
||||
// GetNetworkInfoAsync returns an instance of a type that can be used to get the
|
||||
// result of the RPC at some future time by invoking the Receive function on the
|
||||
// returned instance.
|
||||
//
|
||||
// See GetNetworkInfo for the blocking version and more details.
|
||||
func (c *Client) GetNetworkInfoAsync() FutureGetNetworkInfoResult {
|
||||
cmd := btcjson.NewGetNetworkInfoCmd()
|
||||
return c.sendCmd(cmd)
|
||||
}
|
||||
|
||||
// GetNetworkInfo returns data about the current network.
|
||||
func (c *Client) GetNetworkInfo() (*btcjson.GetNetworkInfoResult, error) {
|
||||
return c.GetNetworkInfoAsync().Receive()
|
||||
}
|
||||
|
||||
// FutureGetPeerInfoResult is a future promise to deliver the result of a
|
||||
// GetPeerInfoAsync RPC invocation (or an applicable error).
|
||||
type FutureGetPeerInfoResult chan *response
|
||||
|
|
|
@ -15,6 +15,12 @@ import (
|
|||
"github.com/btcsuite/btcutil"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultMaxFeeRate is the default maximum fee rate in sat/KB enforced
|
||||
// by bitcoind v0.19.0 or after for transaction broadcast.
|
||||
defaultMaxFeeRate = btcutil.SatoshiPerBitcoin / 10
|
||||
)
|
||||
|
||||
// SigHashType enumerates the available signature hashing types that the
|
||||
// SignRawTransaction function accepts.
|
||||
type SigHashType string
|
||||
|
@ -296,7 +302,31 @@ func (c *Client) SendRawTransactionAsync(tx *wire.MsgTx, allowHighFees bool) Fut
|
|||
txHex = hex.EncodeToString(buf.Bytes())
|
||||
}
|
||||
|
||||
cmd := btcjson.NewSendRawTransactionCmd(txHex, &allowHighFees)
|
||||
// Due to differences in the sendrawtransaction API for different
|
||||
// backends, we'll need to inspect our version and construct the
|
||||
// appropriate request.
|
||||
version, err := c.BackendVersion()
|
||||
if err != nil {
|
||||
return newFutureError(err)
|
||||
}
|
||||
|
||||
var cmd *btcjson.SendRawTransactionCmd
|
||||
switch version {
|
||||
// Starting from bitcoind v0.19.0, the MaxFeeRate field should be used.
|
||||
case BitcoindPost19:
|
||||
// Using a 0 MaxFeeRate is interpreted as a maximum fee rate not
|
||||
// being enforced by bitcoind.
|
||||
var maxFeeRate int32
|
||||
if !allowHighFees {
|
||||
maxFeeRate = defaultMaxFeeRate
|
||||
}
|
||||
cmd = btcjson.NewBitcoindSendRawTransactionCmd(txHex, maxFeeRate)
|
||||
|
||||
// Otherwise, use the AllowHighFees field.
|
||||
default:
|
||||
cmd = btcjson.NewSendRawTransactionCmd(txHex, &allowHighFees)
|
||||
}
|
||||
|
||||
return c.sendCmd(cmd)
|
||||
}
|
||||
|
||||
|
|
|
@ -321,9 +321,13 @@ func WriteMessageWithEncodingN(w io.Writer, msg Message, pver uint32,
|
|||
return totalBytes, err
|
||||
}
|
||||
|
||||
// Write payload.
|
||||
n, err = w.Write(payload)
|
||||
totalBytes += n
|
||||
// Only write the payload if there is one, e.g., verack messages don't
|
||||
// have one.
|
||||
if len(payload) > 0 {
|
||||
n, err = w.Write(payload)
|
||||
totalBytes += n
|
||||
}
|
||||
|
||||
return totalBytes, err
|
||||
}
|
||||
|
||||
|
|
|
@ -188,8 +188,8 @@ func DecodeAddress(addr string, defaultNet *chaincfg.Params) (Address, error) {
|
|||
}
|
||||
switch len(decoded) {
|
||||
case ripemd160.Size: // P2PKH or P2SH
|
||||
isP2PKH := chaincfg.IsPubKeyHashAddrID(netID)
|
||||
isP2SH := chaincfg.IsScriptHashAddrID(netID)
|
||||
isP2PKH := netID == defaultNet.PubKeyHashAddrID
|
||||
isP2SH := netID == defaultNet.ScriptHashAddrID
|
||||
switch hash160 := decoded; {
|
||||
case isP2PKH && isP2SH:
|
||||
return nil, ErrAddressCollision
|
||||
|
|
|
@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
// x -= 0x80 << 63 // Always zero.
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import "errors"
|
||||
|
||||
// Deprecated: do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
|
@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
m1, m2 := e1.value, e2.value
|
||||
m1 := extensionAsLegacyType(e1.value)
|
||||
m2 := extensionAsLegacyType(e2.value)
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
|
|
|
@ -185,9 +185,25 @@ type Extension struct {
|
|||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
desc *ExtensionDesc
|
||||
|
||||
// value is a concrete value for the extension field. Let the type of
|
||||
// desc.ExtensionType be the "API type" and the type of Extension.value
|
||||
// be the "storage type". The API type and storage type are the same except:
|
||||
// * For scalars (except []byte), the API type uses *T,
|
||||
// while the storage type uses T.
|
||||
// * For repeated fields, the API type uses []T, while the storage type
|
||||
// uses *[]T.
|
||||
//
|
||||
// The reason for the divergence is so that the storage type more naturally
|
||||
// matches what is expected of when retrieving the values through the
|
||||
// protobuf reflection APIs.
|
||||
//
|
||||
// The value may only be populated if desc is also populated.
|
||||
value interface{}
|
||||
enc []byte
|
||||
|
||||
// enc is the raw bytes for the extension field.
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
|
@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
|||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return e.value, nil
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
if extension.ExtensionType == nil {
|
||||
|
@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
|||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.value = extensionAsStorageType(v)
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return e.value, nil
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
|
@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
|
|||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return errors.New("proto: bad extension value type")
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
|
@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
|
|||
}
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: value}
|
||||
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) {
|
|||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
||||
|
||||
// extensionAsLegacyType converts an value in the storage type as the API type.
|
||||
// See Extension.value.
|
||||
func extensionAsLegacyType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
// Represent primitive types as a pointer to the value.
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// extensionAsStorageType converts an value in the API type as the storage type.
|
||||
// See Extension.value.
|
||||
func extensionAsStorageType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Represent slice types as a pointer to the value.
|
||||
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
|
|
@ -341,26 +341,6 @@ type Message interface {
|
|||
ProtoMessage()
|
||||
}
|
||||
|
||||
// Stats records allocation details about the protocol buffer encoders
|
||||
// and decoders. Useful for tuning the library itself.
|
||||
type Stats struct {
|
||||
Emalloc uint64 // mallocs in encode
|
||||
Dmalloc uint64 // mallocs in decode
|
||||
Encode uint64 // number of encodes
|
||||
Decode uint64 // number of decodes
|
||||
Chit uint64 // number of cache hits
|
||||
Cmiss uint64 // number of cache misses
|
||||
Size uint64 // number of sizes
|
||||
}
|
||||
|
||||
// Set to true to enable stats collection.
|
||||
const collectStats = false
|
||||
|
||||
var stats Stats
|
||||
|
||||
// GetStats returns a copy of the global Stats structure.
|
||||
func GetStats() Stats { return stats }
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
|
@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion2 = true
|
||||
const (
|
||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion3 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion1 = true
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion1 = true
|
||||
)
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
|
|
|
@ -36,13 +36,7 @@ package proto
|
|||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
|
@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte {
|
|||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
||||
return marshalMessageSet(exts, false)
|
||||
}
|
||||
|
||||
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
|
||||
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
var u marshalInfo
|
||||
siz := u.sizeMessageSet(exts)
|
||||
b := make([]byte, 0, siz)
|
||||
return u.appendMessageSet(b, exts, deterministic)
|
||||
|
||||
case map[int32]Extension:
|
||||
// This is an old-style extension map.
|
||||
// Wrap it in a new-style XXX_InternalExtensions.
|
||||
ie := XXX_InternalExtensions{
|
||||
p: &struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}{
|
||||
extensionMap: exts,
|
||||
},
|
||||
}
|
||||
|
||||
var u marshalInfo
|
||||
siz := u.sizeMessageSet(&ie)
|
||||
b := make([]byte, 0, siz)
|
||||
return u.appendMessageSet(b, &ie, deterministic)
|
||||
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
|
@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
var mu sync.Locker
|
||||
m, mu = exts.extensionsRead()
|
||||
if m != nil {
|
||||
// Keep the extensions map locked until we're done marshaling to prevent
|
||||
// races between marshaling and unmarshaling the lazily-{en,de}coded
|
||||
// values.
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
}
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
var b bytes.Buffer
|
||||
b.WriteByte('{')
|
||||
|
||||
// Process the map in key order for deterministic output.
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
|
||||
|
||||
for i, id := range ids {
|
||||
ext := m[id]
|
||||
msd, ok := messageSetMap[id]
|
||||
if !ok {
|
||||
// Unknown type; we can't render it, so skip it.
|
||||
continue
|
||||
}
|
||||
|
||||
if i > 0 && b.Len() > 1 {
|
||||
b.WriteByte(',')
|
||||
}
|
||||
|
||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||
|
||||
x := ext.value
|
||||
if x == nil {
|
||||
x = reflect.New(msd.t.Elem()).Interface()
|
||||
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d, err := json.Marshal(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Write(d)
|
||||
}
|
||||
b.WriteByte('}')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
|
||||
// Common-case fast path.
|
||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is fairly tricky, and it's not clear that it is needed.
|
||||
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
|
||||
}
|
||||
|
||||
// A global registry of types that can be used in a MessageSet.
|
||||
|
||||
var messageSetMap = make(map[int32]messageSetDesc)
|
||||
|
||||
type messageSetDesc struct {
|
||||
t reflect.Type // pointer to struct
|
||||
name string
|
||||
}
|
||||
|
||||
// RegisterMessageSetType is called from the generated code.
|
||||
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
|
||||
messageSetMap[fieldNum] = messageSetDesc{
|
||||
t: reflect.TypeOf(m),
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,10 +79,13 @@ func toPointer(i *Message) pointer {
|
|||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
||||
v := reflect.ValueOf(*i)
|
||||
u := reflect.New(v.Type())
|
||||
u.Elem().Set(v)
|
||||
if deref {
|
||||
u = u.Elem()
|
||||
}
|
||||
return pointer{v: u}
|
||||
}
|
||||
|
||||
|
|
|
@ -85,16 +85,21 @@ func toPointer(i *Message) pointer {
|
|||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
||||
// Super-tricky - read or get the address of data word of interface value.
|
||||
if isptr {
|
||||
// The interface is of pointer type, thus it is a direct interface.
|
||||
// The data word is the pointer data itself. We take its address.
|
||||
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
} else {
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
if deref {
|
||||
p.p = *(*unsafe.Pointer)(p.p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
|
|
|
@ -38,7 +38,6 @@ package proto
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -194,7 +193,7 @@ func (p *Properties) Parse(s string) {
|
|||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
|
||||
log.Printf("proto: tag has too few fields: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -214,7 +213,7 @@ func (p *Properties) Parse(s string) {
|
|||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
|
||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -334,9 +333,6 @@ func GetProperties(t reflect.Type) *StructProperties {
|
|||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return sprop
|
||||
}
|
||||
|
||||
|
@ -346,17 +342,20 @@ func GetProperties(t reflect.Type) *StructProperties {
|
|||
return sprop
|
||||
}
|
||||
|
||||
type (
|
||||
oneofFuncsIface interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
oneofWrappersIface interface {
|
||||
XXX_OneofWrappers() []interface{}
|
||||
}
|
||||
)
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return prop
|
||||
}
|
||||
if collectStats {
|
||||
stats.Cmiss++
|
||||
}
|
||||
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
|
@ -391,13 +390,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
var oots []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oots = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oots = m.XXX_OneofWrappers()
|
||||
}
|
||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
||||
var oots []interface{}
|
||||
_, _, _, oots = om.XXX_OneofFuncs()
|
||||
|
||||
if len(oots) > 0 {
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
|
|
|
@ -87,6 +87,7 @@ type marshalElemInfo struct {
|
|||
sizer sizer
|
||||
marshaler marshaler
|
||||
isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
|
||||
deref bool // dereference the pointer before operating on it; implies isptr
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() {
|
|||
|
||||
// get oneof implementers
|
||||
var oneofImplementers []interface{}
|
||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oneofImplementers = m.XXX_OneofWrappers()
|
||||
}
|
||||
|
||||
n := t.NumField()
|
||||
|
@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
|
|||
panic("tag is not an integer")
|
||||
}
|
||||
wt := wiretype(tags[0])
|
||||
if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
|
||||
t = t.Elem()
|
||||
}
|
||||
sizer, marshaler := typeMarshaler(t, tags, false, false)
|
||||
var deref bool
|
||||
if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
|
||||
t = reflect.PtrTo(t)
|
||||
deref = true
|
||||
}
|
||||
e = &marshalElemInfo{
|
||||
wiretag: uint64(tag)<<3 | wt,
|
||||
tagsize: SizeVarint(uint64(tag) << 3),
|
||||
sizer: sizer,
|
||||
marshaler: marshaler,
|
||||
isptr: t.Kind() == reflect.Ptr,
|
||||
deref: deref,
|
||||
}
|
||||
|
||||
// update cache
|
||||
|
@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
|
|||
|
||||
func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
|
||||
fi.field = toField(f)
|
||||
fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
|
||||
fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
|
||||
fi.isPointer = true
|
||||
fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
|
||||
fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
|
||||
|
@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
|
|||
}
|
||||
}
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
|
||||
// wiretype returns the wire encoding of the type.
|
||||
func wiretype(encoding string) uint64 {
|
||||
switch encoding {
|
||||
|
@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
|
|||
for _, k := range m.MapKeys() {
|
||||
ki := k.Interface()
|
||||
vi := m.MapIndex(k).Interface()
|
||||
kaddr := toAddrPointer(&ki, false) // pointer to key
|
||||
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
|
||||
kaddr := toAddrPointer(&ki, false, false) // pointer to key
|
||||
vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
|
||||
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
||||
n += siz + SizeVarint(uint64(siz)) + tagsize
|
||||
}
|
||||
|
@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
|
|||
for _, k := range keys {
|
||||
ki := k.Interface()
|
||||
vi := m.MapIndex(k).Interface()
|
||||
kaddr := toAddrPointer(&ki, false) // pointer to key
|
||||
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
|
||||
kaddr := toAddrPointer(&ki, false, false) // pointer to key
|
||||
vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
|
||||
b = appendVarint(b, tag)
|
||||
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
||||
b = appendVarint(b, uint64(siz))
|
||||
|
@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
|
|||
// the last time this function was called.
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
n += ei.sizer(p, ei.tagsize)
|
||||
}
|
||||
mu.Unlock()
|
||||
|
@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
|
@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
|
@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
n += ei.sizer(p, 1) // message, tag = 3 (size=1)
|
||||
}
|
||||
mu.Unlock()
|
||||
|
@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
|
@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
||||
b = append(b, 1<<3|WireEndGroup)
|
||||
if !nerr.Merge(err) {
|
||||
|
@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
n += ei.sizer(p, ei.tagsize)
|
||||
}
|
||||
return n
|
||||
|
@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
|
|||
|
||||
ei := u.getExtElemInfo(e.desc)
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
p := toAddrPointer(&v, ei.isptr, ei.deref)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
|
|
|
@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
|
|||
u.computeUnmarshalInfo()
|
||||
}
|
||||
if u.isMessageSet {
|
||||
return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
|
||||
return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
|
||||
}
|
||||
var reqMask uint64 // bitmask of required fields we've seen.
|
||||
var errLater error
|
||||
|
@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
|||
}
|
||||
|
||||
// Find any types associated with oneof fields.
|
||||
// TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
|
||||
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
|
||||
if fn.IsValid() {
|
||||
res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
|
||||
for i := res.Len() - 1; i >= 0; i-- {
|
||||
v := res.Index(i) // interface{}
|
||||
tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
|
||||
typ := tptr.Elem() // Msg_X
|
||||
var oneofImplementers []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oneofImplementers = m.XXX_OneofWrappers()
|
||||
}
|
||||
for _, v := range oneofImplementers {
|
||||
tptr := reflect.TypeOf(v) // *Msg_X
|
||||
typ := tptr.Elem() // Msg_X
|
||||
|
||||
f := typ.Field(0) // oneof implementers have one field
|
||||
baseUnmarshal := fieldUnmarshaler(&f)
|
||||
tags := strings.Split(f.Tag.Get("protobuf"), ",")
|
||||
fieldNum, err := strconv.Atoi(tags[1])
|
||||
if err != nil {
|
||||
panic("protobuf tag field not an integer: " + tags[1])
|
||||
}
|
||||
var name string
|
||||
for _, tag := range tags {
|
||||
if strings.HasPrefix(tag, "name=") {
|
||||
name = strings.TrimPrefix(tag, "name=")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Find the oneof field that this struct implements.
|
||||
// Might take O(n^2) to process all of the oneofs, but who cares.
|
||||
for _, of := range oneofFields {
|
||||
if tptr.Implements(of.ityp) {
|
||||
// We have found the corresponding interface for this struct.
|
||||
// That lets us know where this struct should be stored
|
||||
// when we encounter it during unmarshaling.
|
||||
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
|
||||
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
||||
}
|
||||
f := typ.Field(0) // oneof implementers have one field
|
||||
baseUnmarshal := fieldUnmarshaler(&f)
|
||||
tags := strings.Split(f.Tag.Get("protobuf"), ",")
|
||||
fieldNum, err := strconv.Atoi(tags[1])
|
||||
if err != nil {
|
||||
panic("protobuf tag field not an integer: " + tags[1])
|
||||
}
|
||||
var name string
|
||||
for _, tag := range tags {
|
||||
if strings.HasPrefix(tag, "name=") {
|
||||
name = strings.TrimPrefix(tag, "name=")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Find the oneof field that this struct implements.
|
||||
// Might take O(n^2) to process all of the oneofs, but who cares.
|
||||
for _, of := range oneofFields {
|
||||
if tptr.Implements(of.ityp) {
|
||||
// We have found the corresponding interface for this struct.
|
||||
// That lets us know where this struct should be stored
|
||||
// when we encounter it during unmarshaling.
|
||||
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
|
||||
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Get extension ranges, if any.
|
||||
fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
||||
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
||||
if fn.IsValid() {
|
||||
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
|
||||
panic("a message with extensions, but no extensions field in " + t.Name())
|
||||
|
@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte {
|
|||
// If there is an error, it returns 0,0.
|
||||
func decodeVarint(b []byte) (uint64, int) {
|
||||
var x, y uint64
|
||||
if len(b) <= 0 {
|
||||
if len(b) == 0 {
|
||||
goto bad
|
||||
}
|
||||
x = uint64(b[0])
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -417,6 +417,17 @@ message FileOptions {
|
|||
// determining the namespace.
|
||||
optional string php_namespace = 41;
|
||||
|
||||
|
||||
// Use this option to change the namespace of php generated metadata classes.
|
||||
// Default is empty. When this option is empty, the proto file name will be used
|
||||
// for determining the namespace.
|
||||
optional string php_metadata_namespace = 44;
|
||||
|
||||
// Use this option to change the package of ruby generated classes. Default
|
||||
// is empty. When this option is not set, the package name will be used for
|
||||
// determining the ruby package.
|
||||
optional string ruby_package = 45;
|
||||
|
||||
// The parser stores options it doesn't recognize here.
|
||||
// See the documentation for the "Options" section above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/any.proto
|
||||
|
||||
package any // import "github.com/golang/protobuf/ptypes/any"
|
||||
package any
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
|
@ -16,7 +18,7 @@ var _ = math.Inf
|
|||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
|
@ -99,17 +101,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|||
// }
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name whose content describes the type of the
|
||||
// serialized protocol buffer message.
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||
// following restrictions and interpretations apply:
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * The last segment of the URL's path must represent the fully
|
||||
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||
// The name should be in a canonical form (e.g., leading "." is
|
||||
// not accepted).
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
|
@ -118,6 +121,10 @@ type Any struct {
|
|||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
|
@ -133,17 +140,19 @@ func (m *Any) Reset() { *m = Any{} }
|
|||
func (m *Any) String() string { return proto.CompactTextString(m) }
|
||||
func (*Any) ProtoMessage() {}
|
||||
func (*Any) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_any_744b9ca530f228db, []int{0}
|
||||
return fileDescriptor_b53526c13ae22eb4, []int{0}
|
||||
}
|
||||
|
||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||
|
||||
func (m *Any) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Any.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Any) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Any.Merge(dst, src)
|
||||
func (m *Any) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Any.Merge(m, src)
|
||||
}
|
||||
func (m *Any) XXX_Size() int {
|
||||
return xxx_messageInfo_Any.Size(m)
|
||||
|
@ -172,9 +181,9 @@ func init() {
|
|||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
|
||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
|
||||
|
||||
var fileDescriptor_any_744b9ca530f228db = []byte{
|
||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
||||
// 185 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
||||
|
|
|
@ -120,17 +120,18 @@ option objc_class_prefix = "GPB";
|
|||
// }
|
||||
//
|
||||
message Any {
|
||||
// A URL/resource name whose content describes the type of the
|
||||
// serialized protocol buffer message.
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||
// following restrictions and interpretations apply:
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * The last segment of the URL's path must represent the fully
|
||||
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||
// The name should be in a canonical form (e.g., leading "." is
|
||||
// not accepted).
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
|
@ -139,6 +140,10 @@ message Any {
|
|||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
|
|
|
@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) {
|
|||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos)
|
||||
d += time.Duration(p.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
}
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/duration.proto
|
||||
|
||||
package duration // import "github.com/golang/protobuf/ptypes/duration"
|
||||
package duration
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
|
@ -16,7 +18,7 @@ var _ = math.Inf
|
|||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
|
@ -99,17 +101,19 @@ func (m *Duration) Reset() { *m = Duration{} }
|
|||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_duration_e7d612259e3f0613, []int{0}
|
||||
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
|
||||
}
|
||||
|
||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||
|
||||
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Duration.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Duration) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Duration.Merge(dst, src)
|
||||
func (m *Duration) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Duration.Merge(m, src)
|
||||
}
|
||||
func (m *Duration) XXX_Size() int {
|
||||
return xxx_messageInfo_Duration.Size(m)
|
||||
|
@ -138,11 +142,9 @@ func init() {
|
|||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613)
|
||||
}
|
||||
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
|
||||
|
||||
var fileDescriptor_duration_e7d612259e3f0613 = []byte{
|
||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
||||
// 190 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
||||
|
|
|
@ -111,11 +111,9 @@ func TimestampNow() *tspb.Timestamp {
|
|||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
|
||||
seconds := t.Unix()
|
||||
nanos := int32(t.Sub(time.Unix(seconds, 0)))
|
||||
ts := &tspb.Timestamp{
|
||||
Seconds: seconds,
|
||||
Nanos: nanos,
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/timestamp.proto
|
||||
|
||||
package timestamp // import "github.com/golang/protobuf/ptypes/timestamp"
|
||||
package timestamp
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
|
@ -16,7 +18,7 @@ var _ = math.Inf
|
|||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
|
@ -81,7 +83,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required, though only UTC (as indicated by "Z") is presently supported.
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
|
@ -92,8 +96,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
|
||||
// to obtain a formatter capable of generating timestamps in this format.
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
type Timestamp struct {
|
||||
|
@ -115,17 +119,19 @@ func (m *Timestamp) Reset() { *m = Timestamp{} }
|
|||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0}
|
||||
return fileDescriptor_292007bbfe81227e, []int{0}
|
||||
}
|
||||
|
||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||
|
||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Timestamp) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Timestamp.Merge(dst, src)
|
||||
func (m *Timestamp) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Timestamp.Merge(m, src)
|
||||
}
|
||||
func (m *Timestamp) XXX_Size() int {
|
||||
return xxx_messageInfo_Timestamp.Size(m)
|
||||
|
@ -154,11 +160,9 @@ func init() {
|
|||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8)
|
||||
}
|
||||
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
|
||||
|
||||
var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{
|
||||
var fileDescriptor_292007bbfe81227e = []byte{
|
||||
// 191 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
||||
|
|
|
@ -103,7 +103,9 @@ option objc_class_prefix = "GPB";
|
|||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required, though only UTC (as indicated by "Z") is presently supported.
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
|
@ -114,8 +116,8 @@ option objc_class_prefix = "GPB";
|
|||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
|
||||
// to obtain a formatter capable of generating timestamps in this format.
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
message Timestamp {
|
||||
|
|
|
@ -8,34 +8,26 @@ addons:
|
|||
apt:
|
||||
update: true
|
||||
|
||||
env:
|
||||
matrix:
|
||||
- GOTAGS=
|
||||
- GOTAGS=libsqlite3
|
||||
- GOTAGS="sqlite_allow_uri_authority sqlite_app_armor sqlite_foreign_keys sqlite_fts5 sqlite_icu sqlite_introspect sqlite_json sqlite_secure_delete sqlite_see sqlite_stat4 sqlite_trace sqlite_userauth sqlite_vacuum_incr sqlite_vtable sqlite_unlock_notify"
|
||||
- GOTAGS=sqlite_vacuum_full
|
||||
|
||||
go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- master
|
||||
|
||||
before_install:
|
||||
- |
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
brew update
|
||||
fi
|
||||
- |
|
||||
go get github.com/smartystreets/goconvey
|
||||
if [[ "${GOOS}" != "windows" ]]; then
|
||||
go get github.com/mattn/goveralls
|
||||
go get golang.org/x/tools/cmd/cover
|
||||
fi
|
||||
- go get github.com/smartystreets/goconvey
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
|
||||
script:
|
||||
- GOOS=$(go env GOOS) GOARCH=$(go env GOARCH) go build -v -tags "${GOTAGS}" .
|
||||
- |
|
||||
if [[ "${GOOS}" != "windows" ]]; then
|
||||
$HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx
|
||||
go test -race -v . -tags "${GOTAGS}"
|
||||
fi
|
||||
- $HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx
|
||||
- go test -race -v . -tags ""
|
||||
- go test -race -v . -tags "libsqlite3"
|
||||
- go test -race -v . -tags "sqlite_allow_uri_authority sqlite_app_armor sqlite_foreign_keys sqlite_fts5 sqlite_icu sqlite_introspect sqlite_json sqlite_preupdate_hook sqlite_secure_delete sqlite_see sqlite_stat4 sqlite_trace sqlite_userauth sqlite_vacuum_incr sqlite_vtable sqlite_unlock_notify"
|
||||
- go test -race -v . -tags "sqlite_vacuum_full"
|
||||
|
|
|
@ -3,6 +3,7 @@ go-sqlite3
|
|||
|
||||
[](http://godoc.org/github.com/mattn/go-sqlite3)
|
||||
[](https://travis-ci.org/mattn/go-sqlite3)
|
||||
[](https://opencollective.com/mattn-go-sqlite3)
|
||||
[](https://coveralls.io/r/mattn/go-sqlite3?branch=master)
|
||||
[](https://goreportcard.com/report/github.com/mattn/go-sqlite3)
|
||||
|
||||
|
@ -10,23 +11,27 @@ go-sqlite3
|
|||
|
||||
sqlite3 driver conforming to the built-in database/sql interface
|
||||
|
||||
Supported Golang version:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
Supported Golang version: See .travis.yml
|
||||
|
||||
[This package follows the official Golang Release Policy.](https://golang.org/doc/devel/release.html#policy)
|
||||
|
||||
### Overview
|
||||
|
||||
- [go-sqlite3](#go-sqlite3)
|
||||
- [Description](#description)
|
||||
- [Overview](#overview)
|
||||
- [Installation](#installation)
|
||||
- [API Reference](#api-reference)
|
||||
- [Connection String](#connection-string)
|
||||
- [DSN Examples](#dsn-examples)
|
||||
- [Features](#features)
|
||||
- [Usage](#usage)
|
||||
- [Feature / Extension List](#feature--extension-list)
|
||||
- [Compilation](#compilation)
|
||||
- [Android](#android)
|
||||
- [ARM](#arm)
|
||||
- [Cross Compile](#cross-compile)
|
||||
- [Google Cloud Platform](#google-cloud-platform)
|
||||
- [ARM](#arm)
|
||||
- [Cross Compile](#cross-compile)
|
||||
- [Google Cloud Platform](#google-cloud-platform)
|
||||
- [Linux](#linux)
|
||||
- [Alpine](#alpine)
|
||||
- [Fedora](#fedora)
|
||||
|
@ -36,11 +41,22 @@ Supported Golang version:
|
|||
- [Errors](#errors)
|
||||
- [User Authentication](#user-authentication)
|
||||
- [Compile](#compile)
|
||||
- [Usage](#usage)
|
||||
- [Usage](#usage-1)
|
||||
- [Create protected database](#create-protected-database)
|
||||
- [Password Encoding](#password-encoding)
|
||||
- [Available Encoders](#available-encoders)
|
||||
- [Restrictions](#restrictions)
|
||||
- [Support](#support)
|
||||
- [User Management](#user-management)
|
||||
- [SQL](#sql)
|
||||
- [Examples](#examples)
|
||||
- [*SQLiteConn](#sqliteconn)
|
||||
- [Attached database](#attached-database)
|
||||
- [Extensions](#extensions)
|
||||
- [Spatialite](#spatialite)
|
||||
- [FAQ](#faq)
|
||||
- [License](#license)
|
||||
- [Author](#author)
|
||||
|
||||
# Installation
|
||||
|
||||
|
@ -151,6 +167,7 @@ go build --tags "icu json1 fts5 secure_delete"
|
|||
| International Components for Unicode | sqlite_icu | This option causes the International Components for Unicode or "ICU" extension to SQLite to be added to the build |
|
||||
| Introspect PRAGMAS | sqlite_introspect | This option adds some extra PRAGMA statements. <ul><li>PRAGMA function_list</li><li>PRAGMA module_list</li><li>PRAGMA pragma_list</li></ul> |
|
||||
| JSON SQL Functions | sqlite_json | When this option is defined in the amalgamation, the JSON SQL functions are added to the build automatically |
|
||||
| Pre Update Hook | sqlite_preupdate_hook | Registers a callback function that is invoked prior to each INSERT, UPDATE, and DELETE operation on a database table. |
|
||||
| Secure Delete | sqlite_secure_delete | This compile-time option changes the default setting of the secure_delete pragma.<br><br>When this option is not used, secure_delete defaults to off. When this option is present, secure_delete defaults to on.<br><br>The secure_delete setting causes deleted content to be overwritten with zeros. There is a small performance penalty since additional I/O must occur.<br><br>On the other hand, secure_delete can prevent fragments of sensitive information from lingering in unused parts of the database file after it has been deleted. See the documentation on the secure_delete pragma for additional information |
|
||||
| Secure Delete (FAST) | sqlite_secure_delete_fast | For more information see [PRAGMA secure_delete](https://www.sqlite.org/pragma.html#pragma_secure_delete) |
|
||||
| Tracing / Debug | sqlite_trace | Activate trace functions |
|
||||
|
@ -249,7 +266,7 @@ Required dependency
|
|||
brew install sqlite3
|
||||
```
|
||||
|
||||
For OSX there is an additional package install which is required if you whish to build the `icu` extension.
|
||||
For OSX there is an additional package install which is required if you wish to build the `icu` extension.
|
||||
|
||||
This additional package can be installed with `homebrew`.
|
||||
|
||||
|
@ -282,7 +299,7 @@ To compile this package on Windows OS you must have the `gcc` compiler installed
|
|||
3) Open a terminal for the TDM-GCC toolchain, can be found in the Windows Start menu.
|
||||
4) Navigate to your project folder and run the `go build ...` command for this package.
|
||||
|
||||
For example the TDM-GCC Toolchain can be found [here](ttps://sourceforge.net/projects/tdm-gcc/).
|
||||
For example the TDM-GCC Toolchain can be found [here](https://sourceforge.net/projects/tdm-gcc/).
|
||||
|
||||
## Errors
|
||||
|
||||
|
@ -458,15 +475,19 @@ For an example see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatial
|
|||
|
||||
Why is it racy if I use a `sql.Open("sqlite3", ":memory:")` database?
|
||||
|
||||
Each connection to :memory: opens a brand new in-memory sql database, so if
|
||||
Each connection to `":memory:"` opens a brand new in-memory sql database, so if
|
||||
the stdlib's sql engine happens to open another connection and you've only
|
||||
specified ":memory:", that connection will see a brand new database. A
|
||||
workaround is to use "file::memory:?mode=memory&cache=shared". Every
|
||||
connection to this string will point to the same in-memory database.
|
||||
specified `":memory:"`, that connection will see a brand new database. A
|
||||
workaround is to use `"file::memory:?cache=shared"` (or `"file:foobar?mode=memory&cache=shared"`). Every
|
||||
connection to this string will point to the same in-memory database.
|
||||
|
||||
Note that if the last database connection in the pool closes, the in-memory database is deleted. Make sure the [max idle connection limit](https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns) is > 0, and the [connection lifetime](https://golang.org/pkg/database/sql/#DB.SetConnMaxLifetime) is infinite.
|
||||
|
||||
For more information see
|
||||
* [#204](https://github.com/mattn/go-sqlite3/issues/204)
|
||||
* [#511](https://github.com/mattn/go-sqlite3/issues/511)
|
||||
* https://www.sqlite.org/sharedcache.html#shared_cache_and_in_memory_databases
|
||||
* https://www.sqlite.org/inmemorydb.html#sharedmemdb
|
||||
|
||||
- Reading from database with large amount of goroutines fails on OSX.
|
||||
|
||||
|
@ -481,11 +502,11 @@ For an example see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatial
|
|||
|
||||
You need to implement the feature or call the sqlite3 cli.
|
||||
|
||||
More infomation see [#305](https://github.com/mattn/go-sqlite3/issues/305)
|
||||
More information see [#305](https://github.com/mattn/go-sqlite3/issues/305)
|
||||
|
||||
- Error: `database is locked`
|
||||
|
||||
When you get an database is locked. Please use the following options.
|
||||
When you get a database is locked. Please use the following options.
|
||||
|
||||
Add to DSN: `cache=shared`
|
||||
|
||||
|
@ -497,11 +518,41 @@ For an example see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatial
|
|||
Second please set the database connections of the SQL package to 1.
|
||||
|
||||
```go
|
||||
db.SetMaxOpenConn(1)
|
||||
db.SetMaxOpenConns(1)
|
||||
```
|
||||
|
||||
More information see [#209](https://github.com/mattn/go-sqlite3/issues/209)
|
||||
|
||||
## Contributors
|
||||
|
||||
### Code Contributors
|
||||
|
||||
This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
|
||||
<a href="https://github.com/mattn/go-sqlite3/graphs/contributors"><img src="https://opencollective.com/mattn-go-sqlite3/contributors.svg?width=890&button=false" /></a>
|
||||
|
||||
### Financial Contributors
|
||||
|
||||
Become a financial contributor and help us sustain our community. [[Contribute](https://opencollective.com/mattn-go-sqlite3/contribute)]
|
||||
|
||||
#### Individuals
|
||||
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3"><img src="https://opencollective.com/mattn-go-sqlite3/individuals.svg?width=890"></a>
|
||||
|
||||
#### Organizations
|
||||
|
||||
Support this project with your organization. Your logo will show up here with a link to your website. [[Contribute](https://opencollective.com/mattn-go-sqlite3/contribute)]
|
||||
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3/organization/0/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/0/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3/organization/1/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3/organization/2/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3/organization/3/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3/organization/4/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3/organization/5/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3/organization/6/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3/organization/7/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3/organization/8/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/mattn-go-sqlite3/organization/9/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/9/avatar.svg"></a>
|
||||
|
||||
# License
|
||||
|
||||
MIT: http://mattn.mit-license.org/2018
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
@ -25,18 +25,18 @@ type SQLiteBackup struct {
|
|||
}
|
||||
|
||||
// Backup make backup from src to dest.
|
||||
func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) {
|
||||
func (destConn *SQLiteConn) Backup(dest string, srcConn *SQLiteConn, src string) (*SQLiteBackup, error) {
|
||||
destptr := C.CString(dest)
|
||||
defer C.free(unsafe.Pointer(destptr))
|
||||
srcptr := C.CString(src)
|
||||
defer C.free(unsafe.Pointer(srcptr))
|
||||
|
||||
if b := C.sqlite3_backup_init(c.db, destptr, conn.db, srcptr); b != nil {
|
||||
if b := C.sqlite3_backup_init(destConn.db, destptr, srcConn.db, srcptr); b != nil {
|
||||
bb := &SQLiteBackup{b: b}
|
||||
runtime.SetFinalizer(bb, (*SQLiteBackup).Finish)
|
||||
return bb, nil
|
||||
}
|
||||
return nil, c.lastError()
|
||||
return nil, destConn.lastError()
|
||||
}
|
||||
|
||||
// Step to backs up for one step. Calls the underlying `sqlite3_backup_step`
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
@ -83,8 +83,22 @@ func authorizerTrampoline(handle uintptr, op int, arg1 *C.char, arg2 *C.char, ar
|
|||
return callback(op, C.GoString(arg1), C.GoString(arg2), C.GoString(arg3))
|
||||
}
|
||||
|
||||
// Use handles to avoid passing Go pointers to C.
|
||||
//export preUpdateHookTrampoline
|
||||
func preUpdateHookTrampoline(handle uintptr, dbHandle uintptr, op int, db *C.char, table *C.char, oldrowid int64, newrowid int64) {
|
||||
hval := lookupHandleVal(handle)
|
||||
data := SQLitePreUpdateData{
|
||||
Conn: hval.db,
|
||||
Op: op,
|
||||
DatabaseName: C.GoString(db),
|
||||
TableName: C.GoString(table),
|
||||
OldRowID: oldrowid,
|
||||
NewRowID: newrowid,
|
||||
}
|
||||
callback := hval.val.(func(SQLitePreUpdateData))
|
||||
callback(data)
|
||||
}
|
||||
|
||||
// Use handles to avoid passing Go pointers to C.
|
||||
type handleVal struct {
|
||||
db *SQLiteConn
|
||||
val interface{}
|
||||
|
@ -103,7 +117,7 @@ func newHandle(db *SQLiteConn, v interface{}) uintptr {
|
|||
return i
|
||||
}
|
||||
|
||||
func lookupHandle(handle uintptr) interface{} {
|
||||
func lookupHandleVal(handle uintptr) handleVal {
|
||||
handleLock.Lock()
|
||||
defer handleLock.Unlock()
|
||||
r, ok := handleVals[handle]
|
||||
|
@ -114,7 +128,11 @@ func lookupHandle(handle uintptr) interface{} {
|
|||
panic("invalid handle")
|
||||
}
|
||||
}
|
||||
return r.val
|
||||
return r
|
||||
}
|
||||
|
||||
func lookupHandle(handle uintptr) interface{} {
|
||||
return lookupHandleVal(handle).val
|
||||
}
|
||||
|
||||
func deleteHandles(db *SQLiteConn) {
|
||||
|
@ -368,7 +386,7 @@ func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
|
|||
func callbackError(ctx *C.sqlite3_context, err error) {
|
||||
cstr := C.CString(err.Error())
|
||||
defer C.free(unsafe.Pointer(cstr))
|
||||
C.sqlite3_result_error(ctx, cstr, -1)
|
||||
C.sqlite3_result_error(ctx, cstr, C.int(-1))
|
||||
}
|
||||
|
||||
// Test support code. Tests are not allowed to import "C", so we can't
|
||||
|
|
|
@ -0,0 +1,299 @@
|
|||
// Extracted from Go database/sql source code
|
||||
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Type conversions for Scan.
|
||||
|
||||
package sqlite3
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
var errNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error
|
||||
|
||||
// convertAssign copies to dest the value in src, converting it if possible.
|
||||
// An error is returned if the copy would result in loss of information.
|
||||
// dest should be a pointer type.
|
||||
func convertAssign(dest, src interface{}) error {
|
||||
// Common cases, without reflect.
|
||||
switch s := src.(type) {
|
||||
case string:
|
||||
switch d := dest.(type) {
|
||||
case *string:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = s
|
||||
return nil
|
||||
case *[]byte:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = []byte(s)
|
||||
return nil
|
||||
case *sql.RawBytes:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = append((*d)[:0], s...)
|
||||
return nil
|
||||
}
|
||||
case []byte:
|
||||
switch d := dest.(type) {
|
||||
case *string:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = string(s)
|
||||
return nil
|
||||
case *interface{}:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = cloneBytes(s)
|
||||
return nil
|
||||
case *[]byte:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = cloneBytes(s)
|
||||
return nil
|
||||
case *sql.RawBytes:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = s
|
||||
return nil
|
||||
}
|
||||
case time.Time:
|
||||
switch d := dest.(type) {
|
||||
case *time.Time:
|
||||
*d = s
|
||||
return nil
|
||||
case *string:
|
||||
*d = s.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
case *[]byte:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = []byte(s.Format(time.RFC3339Nano))
|
||||
return nil
|
||||
case *sql.RawBytes:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = s.AppendFormat((*d)[:0], time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
case nil:
|
||||
switch d := dest.(type) {
|
||||
case *interface{}:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = nil
|
||||
return nil
|
||||
case *[]byte:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = nil
|
||||
return nil
|
||||
case *sql.RawBytes:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var sv reflect.Value
|
||||
|
||||
switch d := dest.(type) {
|
||||
case *string:
|
||||
sv = reflect.ValueOf(src)
|
||||
switch sv.Kind() {
|
||||
case reflect.Bool,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64:
|
||||
*d = asString(src)
|
||||
return nil
|
||||
}
|
||||
case *[]byte:
|
||||
sv = reflect.ValueOf(src)
|
||||
if b, ok := asBytes(nil, sv); ok {
|
||||
*d = b
|
||||
return nil
|
||||
}
|
||||
case *sql.RawBytes:
|
||||
sv = reflect.ValueOf(src)
|
||||
if b, ok := asBytes([]byte(*d)[:0], sv); ok {
|
||||
*d = sql.RawBytes(b)
|
||||
return nil
|
||||
}
|
||||
case *bool:
|
||||
bv, err := driver.Bool.ConvertValue(src)
|
||||
if err == nil {
|
||||
*d = bv.(bool)
|
||||
}
|
||||
return err
|
||||
case *interface{}:
|
||||
*d = src
|
||||
return nil
|
||||
}
|
||||
|
||||
if scanner, ok := dest.(sql.Scanner); ok {
|
||||
return scanner.Scan(src)
|
||||
}
|
||||
|
||||
dpv := reflect.ValueOf(dest)
|
||||
if dpv.Kind() != reflect.Ptr {
|
||||
return errors.New("destination not a pointer")
|
||||
}
|
||||
if dpv.IsNil() {
|
||||
return errNilPtr
|
||||
}
|
||||
|
||||
if !sv.IsValid() {
|
||||
sv = reflect.ValueOf(src)
|
||||
}
|
||||
|
||||
dv := reflect.Indirect(dpv)
|
||||
if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) {
|
||||
switch b := src.(type) {
|
||||
case []byte:
|
||||
dv.Set(reflect.ValueOf(cloneBytes(b)))
|
||||
default:
|
||||
dv.Set(sv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if dv.Kind() == sv.Kind() && sv.Type().ConvertibleTo(dv.Type()) {
|
||||
dv.Set(sv.Convert(dv.Type()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// The following conversions use a string value as an intermediate representation
|
||||
// to convert between various numeric types.
|
||||
//
|
||||
// This also allows scanning into user defined types such as "type Int int64".
|
||||
// For symmetry, also check for string destination types.
|
||||
switch dv.Kind() {
|
||||
case reflect.Ptr:
|
||||
if src == nil {
|
||||
dv.Set(reflect.Zero(dv.Type()))
|
||||
return nil
|
||||
}
|
||||
dv.Set(reflect.New(dv.Type().Elem()))
|
||||
return convertAssign(dv.Interface(), src)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
s := asString(src)
|
||||
i64, err := strconv.ParseInt(s, 10, dv.Type().Bits())
|
||||
if err != nil {
|
||||
err = strconvErr(err)
|
||||
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
|
||||
}
|
||||
dv.SetInt(i64)
|
||||
return nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
s := asString(src)
|
||||
u64, err := strconv.ParseUint(s, 10, dv.Type().Bits())
|
||||
if err != nil {
|
||||
err = strconvErr(err)
|
||||
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
|
||||
}
|
||||
dv.SetUint(u64)
|
||||
return nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
s := asString(src)
|
||||
f64, err := strconv.ParseFloat(s, dv.Type().Bits())
|
||||
if err != nil {
|
||||
err = strconvErr(err)
|
||||
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
|
||||
}
|
||||
dv.SetFloat(f64)
|
||||
return nil
|
||||
case reflect.String:
|
||||
switch v := src.(type) {
|
||||
case string:
|
||||
dv.SetString(v)
|
||||
return nil
|
||||
case []byte:
|
||||
dv.SetString(string(v))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest)
|
||||
}
|
||||
|
||||
func strconvErr(err error) error {
|
||||
if ne, ok := err.(*strconv.NumError); ok {
|
||||
return ne.Err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func cloneBytes(b []byte) []byte {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
c := make([]byte, len(b))
|
||||
copy(c, b)
|
||||
return c
|
||||
}
|
||||
|
||||
func asString(src interface{}) string {
|
||||
switch v := src.(type) {
|
||||
case string:
|
||||
return v
|
||||
case []byte:
|
||||
return string(v)
|
||||
}
|
||||
rv := reflect.ValueOf(src)
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(rv.Int(), 10)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return strconv.FormatUint(rv.Uint(), 10)
|
||||
case reflect.Float64:
|
||||
return strconv.FormatFloat(rv.Float(), 'g', -1, 64)
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(rv.Float(), 'g', -1, 32)
|
||||
case reflect.Bool:
|
||||
return strconv.FormatBool(rv.Bool())
|
||||
}
|
||||
return fmt.Sprintf("%v", src)
|
||||
}
|
||||
|
||||
func asBytes(buf []byte, rv reflect.Value) (b []byte, ok bool) {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.AppendInt(buf, rv.Int(), 10), true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return strconv.AppendUint(buf, rv.Uint(), 10), true
|
||||
case reflect.Float32:
|
||||
return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 32), true
|
||||
case reflect.Float64:
|
||||
return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 64), true
|
||||
case reflect.Bool:
|
||||
return strconv.AppendBool(buf, rv.Bool()), true
|
||||
case reflect.String:
|
||||
s := rv.String()
|
||||
return append(buf, s...), true
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -124,9 +124,9 @@ extern "C" {
|
|||
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
|
||||
** [sqlite_version()] and [sqlite_source_id()].
|
||||
*/
|
||||
#define SQLITE_VERSION "3.25.2"
|
||||
#define SQLITE_VERSION_NUMBER 3025002
|
||||
#define SQLITE_SOURCE_ID "2018-09-25 19:08:10 fb90e7189ae6d62e77ba3a308ca5d683f90bbe633cf681865365b8e92792d1c7"
|
||||
#define SQLITE_VERSION "3.30.1"
|
||||
#define SQLITE_VERSION_NUMBER 3030001
|
||||
#define SQLITE_SOURCE_ID "2019-10-10 20:19:45 18db032d058f1436ce3dea84081f4ee5a0f2259ad97301d43c426bc7f3df1b0b"
|
||||
|
||||
/*
|
||||
** CAPI3REF: Run-Time Library Version Numbers
|
||||
|
@ -190,6 +190,9 @@ SQLITE_API int sqlite3_libversion_number(void);
|
|||
#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
|
||||
SQLITE_API int sqlite3_compileoption_used(const char *zOptName);
|
||||
SQLITE_API const char *sqlite3_compileoption_get(int N);
|
||||
#else
|
||||
# define sqlite3_compileoption_used(X) 0
|
||||
# define sqlite3_compileoption_get(X) ((void*)0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -824,6 +827,15 @@ struct sqlite3_io_methods {
|
|||
** file space based on this hint in order to help writes to the database
|
||||
** file run faster.
|
||||
**
|
||||
** <li>[[SQLITE_FCNTL_SIZE_LIMIT]]
|
||||
** The [SQLITE_FCNTL_SIZE_LIMIT] opcode is used by in-memory VFS that
|
||||
** implements [sqlite3_deserialize()] to set an upper bound on the size
|
||||
** of the in-memory database. The argument is a pointer to a [sqlite3_int64].
|
||||
** If the integer pointed to is negative, then it is filled in with the
|
||||
** current limit. Otherwise the limit is set to the larger of the value
|
||||
** of the integer pointed to and the current database size. The integer
|
||||
** pointed to is set to the new limit.
|
||||
**
|
||||
** <li>[[SQLITE_FCNTL_CHUNK_SIZE]]
|
||||
** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS
|
||||
** extends and truncates the database file in chunks of a size specified
|
||||
|
@ -1132,6 +1144,7 @@ struct sqlite3_io_methods {
|
|||
#define SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33
|
||||
#define SQLITE_FCNTL_LOCK_TIMEOUT 34
|
||||
#define SQLITE_FCNTL_DATA_VERSION 35
|
||||
#define SQLITE_FCNTL_SIZE_LIMIT 36
|
||||
|
||||
/* deprecated names */
|
||||
#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
|
||||
|
@ -1284,8 +1297,14 @@ typedef struct sqlite3_api_routines sqlite3_api_routines;
|
|||
** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS]
|
||||
** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to
|
||||
** test whether a file is readable and writable, or [SQLITE_ACCESS_READ]
|
||||
** to test whether a file is at least readable. The file can be a
|
||||
** directory.
|
||||
** to test whether a file is at least readable. The SQLITE_ACCESS_READ
|
||||
** flag is never actually used and is not implemented in the built-in
|
||||
** VFSes of SQLite. The file is named by the second argument and can be a
|
||||
** directory. The xAccess method returns [SQLITE_OK] on success or some
|
||||
** non-zero error code if there is an I/O error or if the name of
|
||||
** the file given in the second argument is illegal. If SQLITE_OK
|
||||
** is returned, then non-zero or zero is written into *pResOut to indicate
|
||||
** whether or not the file is accessible.
|
||||
**
|
||||
** ^SQLite will always allocate at least mxPathname+1 bytes for the
|
||||
** output buffer xFullPathname. The exact size of the output buffer
|
||||
|
@ -1973,6 +1992,17 @@ struct sqlite3_mem_methods {
|
|||
** negative value for this option restores the default behaviour.
|
||||
** This option is only available if SQLite is compiled with the
|
||||
** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option.
|
||||
**
|
||||
** [[SQLITE_CONFIG_MEMDB_MAXSIZE]]
|
||||
** <dt>SQLITE_CONFIG_MEMDB_MAXSIZE
|
||||
** <dd>The SQLITE_CONFIG_MEMDB_MAXSIZE option accepts a single parameter
|
||||
** [sqlite3_int64] parameter which is the default maximum size for an in-memory
|
||||
** database created using [sqlite3_deserialize()]. This default maximum
|
||||
** size can be adjusted up or down for individual databases using the
|
||||
** [SQLITE_FCNTL_SIZE_LIMIT] [sqlite3_file_control|file-control]. If this
|
||||
** configuration setting is never used, then the default maximum is determined
|
||||
** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that
|
||||
** compile-time option is not set, then the default maximum is 1073741824.
|
||||
** </dl>
|
||||
*/
|
||||
#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
|
||||
|
@ -2003,6 +2033,7 @@ struct sqlite3_mem_methods {
|
|||
#define SQLITE_CONFIG_STMTJRNL_SPILL 26 /* int nByte */
|
||||
#define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */
|
||||
#define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */
|
||||
#define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */
|
||||
|
||||
/*
|
||||
** CAPI3REF: Database Connection Configuration Options
|
||||
|
@ -2018,6 +2049,7 @@ struct sqlite3_mem_methods {
|
|||
** is invoked.
|
||||
**
|
||||
** <dl>
|
||||
** [[SQLITE_DBCONFIG_LOOKASIDE]]
|
||||
** <dt>SQLITE_DBCONFIG_LOOKASIDE</dt>
|
||||
** <dd> ^This option takes three additional arguments that determine the
|
||||
** [lookaside memory allocator] configuration for the [database connection].
|
||||
|
@ -2040,6 +2072,7 @@ struct sqlite3_mem_methods {
|
|||
** memory is in use leaves the configuration unchanged and returns
|
||||
** [SQLITE_BUSY].)^</dd>
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_ENABLE_FKEY]]
|
||||
** <dt>SQLITE_DBCONFIG_ENABLE_FKEY</dt>
|
||||
** <dd> ^This option is used to enable or disable the enforcement of
|
||||
** [foreign key constraints]. There should be two additional arguments.
|
||||
|
@ -2050,6 +2083,7 @@ struct sqlite3_mem_methods {
|
|||
** following this call. The second parameter may be a NULL pointer, in
|
||||
** which case the FK enforcement setting is not reported back. </dd>
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_ENABLE_TRIGGER]]
|
||||
** <dt>SQLITE_DBCONFIG_ENABLE_TRIGGER</dt>
|
||||
** <dd> ^This option is used to enable or disable [CREATE TRIGGER | triggers].
|
||||
** There should be two additional arguments.
|
||||
|
@ -2060,9 +2094,21 @@ struct sqlite3_mem_methods {
|
|||
** following this call. The second parameter may be a NULL pointer, in
|
||||
** which case the trigger setting is not reported back. </dd>
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_ENABLE_VIEW]]
|
||||
** <dt>SQLITE_DBCONFIG_ENABLE_VIEW</dt>
|
||||
** <dd> ^This option is used to enable or disable [CREATE VIEW | views].
|
||||
** There should be two additional arguments.
|
||||
** The first argument is an integer which is 0 to disable views,
|
||||
** positive to enable views or negative to leave the setting unchanged.
|
||||
** The second parameter is a pointer to an integer into which
|
||||
** is written 0 or 1 to indicate whether views are disabled or enabled
|
||||
** following this call. The second parameter may be a NULL pointer, in
|
||||
** which case the view setting is not reported back. </dd>
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]]
|
||||
** <dt>SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER</dt>
|
||||
** <dd> ^This option is used to enable or disable the two-argument
|
||||
** version of the [fts3_tokenizer()] function which is part of the
|
||||
** <dd> ^This option is used to enable or disable the
|
||||
** [fts3_tokenizer()] function which is part of the
|
||||
** [FTS3] full-text search engine extension.
|
||||
** There should be two additional arguments.
|
||||
** The first argument is an integer which is 0 to disable fts3_tokenizer() or
|
||||
|
@ -2073,6 +2119,7 @@ struct sqlite3_mem_methods {
|
|||
** following this call. The second parameter may be a NULL pointer, in
|
||||
** which case the new setting is not reported back. </dd>
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION]]
|
||||
** <dt>SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION</dt>
|
||||
** <dd> ^This option is used to enable or disable the [sqlite3_load_extension()]
|
||||
** interface independently of the [load_extension()] SQL function.
|
||||
|
@ -2090,7 +2137,7 @@ struct sqlite3_mem_methods {
|
|||
** be a NULL pointer, in which case the new setting is not reported back.
|
||||
** </dd>
|
||||
**
|
||||
** <dt>SQLITE_DBCONFIG_MAINDBNAME</dt>
|
||||
** [[SQLITE_DBCONFIG_MAINDBNAME]] <dt>SQLITE_DBCONFIG_MAINDBNAME</dt>
|
||||
** <dd> ^This option is used to change the name of the "main" database
|
||||
** schema. ^The sole argument is a pointer to a constant UTF8 string
|
||||
** which will become the new schema name in place of "main". ^SQLite
|
||||
|
@ -2099,6 +2146,7 @@ struct sqlite3_mem_methods {
|
|||
** until after the database connection closes.
|
||||
** </dd>
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE]]
|
||||
** <dt>SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE</dt>
|
||||
** <dd> Usually, when a database in wal mode is closed or detached from a
|
||||
** database handle, SQLite checks if this will mean that there are now no
|
||||
|
@ -2112,7 +2160,7 @@ struct sqlite3_mem_methods {
|
|||
** have been disabled - 0 if they are not disabled, 1 if they are.
|
||||
** </dd>
|
||||
**
|
||||
** <dt>SQLITE_DBCONFIG_ENABLE_QPSG</dt>
|
||||
** [[SQLITE_DBCONFIG_ENABLE_QPSG]] <dt>SQLITE_DBCONFIG_ENABLE_QPSG</dt>
|
||||
** <dd>^(The SQLITE_DBCONFIG_ENABLE_QPSG option activates or deactivates
|
||||
** the [query planner stability guarantee] (QPSG). When the QPSG is active,
|
||||
** a single SQL query statement will always use the same algorithm regardless
|
||||
|
@ -2128,7 +2176,7 @@ struct sqlite3_mem_methods {
|
|||
** following this call.
|
||||
** </dd>
|
||||
**
|
||||
** <dt>SQLITE_DBCONFIG_TRIGGER_EQP</dt>
|
||||
** [[SQLITE_DBCONFIG_TRIGGER_EQP]] <dt>SQLITE_DBCONFIG_TRIGGER_EQP</dt>
|
||||
** <dd> By default, the output of EXPLAIN QUERY PLAN commands does not
|
||||
** include output for any operations performed by trigger programs. This
|
||||
** option is used to set or clear (the default) a flag that governs this
|
||||
|
@ -2140,7 +2188,7 @@ struct sqlite3_mem_methods {
|
|||
** it is not disabled, 1 if it is.
|
||||
** </dd>
|
||||
**
|
||||
** <dt>SQLITE_DBCONFIG_RESET_DATABASE</dt>
|
||||
** [[SQLITE_DBCONFIG_RESET_DATABASE]] <dt>SQLITE_DBCONFIG_RESET_DATABASE</dt>
|
||||
** <dd> Set the SQLITE_DBCONFIG_RESET_DATABASE flag and then run
|
||||
** [VACUUM] in order to reset a database back to an empty database
|
||||
** with no schema and no content. The following process works even for
|
||||
|
@ -2159,6 +2207,58 @@ struct sqlite3_mem_methods {
|
|||
** Because resetting a database is destructive and irreversible, the
|
||||
** process requires the use of this obscure API and multiple steps to help
|
||||
** ensure that it does not happen by accident.
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_DEFENSIVE]] <dt>SQLITE_DBCONFIG_DEFENSIVE</dt>
|
||||
** <dd>The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the
|
||||
** "defensive" flag for a database connection. When the defensive
|
||||
** flag is enabled, language features that allow ordinary SQL to
|
||||
** deliberately corrupt the database file are disabled. The disabled
|
||||
** features include but are not limited to the following:
|
||||
** <ul>
|
||||
** <li> The [PRAGMA writable_schema=ON] statement.
|
||||
** <li> The [PRAGMA journal_mode=OFF] statement.
|
||||
** <li> Writes to the [sqlite_dbpage] virtual table.
|
||||
** <li> Direct writes to [shadow tables].
|
||||
** </ul>
|
||||
** </dd>
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_WRITABLE_SCHEMA]] <dt>SQLITE_DBCONFIG_WRITABLE_SCHEMA</dt>
|
||||
** <dd>The SQLITE_DBCONFIG_WRITABLE_SCHEMA option activates or deactivates the
|
||||
** "writable_schema" flag. This has the same effect and is logically equivalent
|
||||
** to setting [PRAGMA writable_schema=ON] or [PRAGMA writable_schema=OFF].
|
||||
** The first argument to this setting is an integer which is 0 to disable
|
||||
** the writable_schema, positive to enable writable_schema, or negative to
|
||||
** leave the setting unchanged. The second parameter is a pointer to an
|
||||
** integer into which is written 0 or 1 to indicate whether the writable_schema
|
||||
** is enabled or disabled following this call.
|
||||
** </dd>
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_LEGACY_ALTER_TABLE]]
|
||||
** <dt>SQLITE_DBCONFIG_LEGACY_ALTER_TABLE</dt>
|
||||
** <dd>The SQLITE_DBCONFIG_LEGACY_ALTER_TABLE option activates or deactivates
|
||||
** the legacy behavior of the [ALTER TABLE RENAME] command such it
|
||||
** behaves as it did prior to [version 3.24.0] (2018-06-04). See the
|
||||
** "Compatibility Notice" on the [ALTER TABLE RENAME documentation] for
|
||||
** additional information. This feature can also be turned on and off
|
||||
** using the [PRAGMA legacy_alter_table] statement.
|
||||
** </dd>
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_DQS_DML]]
|
||||
** <dt>SQLITE_DBCONFIG_DQS_DML</td>
|
||||
** <dd>The SQLITE_DBCONFIG_DQS_DML option activates or deactivates
|
||||
** the legacy [double-quoted string literal] misfeature for DML statement
|
||||
** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The
|
||||
** default value of this setting is determined by the [-DSQLITE_DQS]
|
||||
** compile-time option.
|
||||
** </dd>
|
||||
**
|
||||
** [[SQLITE_DBCONFIG_DQS_DDL]]
|
||||
** <dt>SQLITE_DBCONFIG_DQS_DDL</td>
|
||||
** <dd>The SQLITE_DBCONFIG_DQS option activates or deactivates
|
||||
** the legacy [double-quoted string literal] misfeature for DDL statements,
|
||||
** such as CREATE TABLE and CREATE INDEX. The
|
||||
** default value of this setting is determined by the [-DSQLITE_DQS]
|
||||
** compile-time option.
|
||||
** </dd>
|
||||
** </dl>
|
||||
*/
|
||||
|
@ -2172,7 +2272,13 @@ struct sqlite3_mem_methods {
|
|||
#define SQLITE_DBCONFIG_ENABLE_QPSG 1007 /* int int* */
|
||||
#define SQLITE_DBCONFIG_TRIGGER_EQP 1008 /* int int* */
|
||||
#define SQLITE_DBCONFIG_RESET_DATABASE 1009 /* int int* */
|
||||
#define SQLITE_DBCONFIG_MAX 1009 /* Largest DBCONFIG */
|
||||
#define SQLITE_DBCONFIG_DEFENSIVE 1010 /* int int* */
|
||||
#define SQLITE_DBCONFIG_WRITABLE_SCHEMA 1011 /* int int* */
|
||||
#define SQLITE_DBCONFIG_LEGACY_ALTER_TABLE 1012 /* int int* */
|
||||
#define SQLITE_DBCONFIG_DQS_DML 1013 /* int int* */
|
||||
#define SQLITE_DBCONFIG_DQS_DDL 1014 /* int int* */
|
||||
#define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */
|
||||
#define SQLITE_DBCONFIG_MAX 1015 /* Largest DBCONFIG */
|
||||
|
||||
/*
|
||||
** CAPI3REF: Enable Or Disable Extended Result Codes
|
||||
|
@ -2329,7 +2435,7 @@ SQLITE_API int sqlite3_changes(sqlite3*);
|
|||
** not. ^Changes to a view that are intercepted by INSTEAD OF triggers
|
||||
** are not counted.
|
||||
**
|
||||
** This the [sqlite3_total_changes(D)] interface only reports the number
|
||||
** The [sqlite3_total_changes(D)] interface only reports the number
|
||||
** of rows that changed due to SQL statement run against database
|
||||
** connection D. Any changes by other database connections are ignored.
|
||||
** To detect changes against a database file from other database
|
||||
|
@ -2973,9 +3079,9 @@ SQLITE_API int sqlite3_set_authorizer(
|
|||
** time is in units of nanoseconds, however the current implementation
|
||||
** is only capable of millisecond resolution so the six least significant
|
||||
** digits in the time are meaningless. Future versions of SQLite
|
||||
** might provide greater resolution on the profiler callback. The
|
||||
** sqlite3_profile() function is considered experimental and is
|
||||
** subject to change in future versions of SQLite.
|
||||
** might provide greater resolution on the profiler callback. Invoking
|
||||
** either [sqlite3_trace()] or [sqlite3_trace_v2()] will cancel the
|
||||
** profile callback.
|
||||
*/
|
||||
SQLITE_API SQLITE_DEPRECATED void *sqlite3_trace(sqlite3*,
|
||||
void(*xTrace)(void*,const char*), void*);
|
||||
|
@ -3389,6 +3495,8 @@ SQLITE_API int sqlite3_open_v2(
|
|||
** is not a database file pathname pointer that SQLite passed into the xOpen
|
||||
** VFS method, then the behavior of this routine is undefined and probably
|
||||
** undesirable.
|
||||
**
|
||||
** See the [URI filename] documentation for additional information.
|
||||
*/
|
||||
SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam);
|
||||
SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
|
||||
|
@ -3610,9 +3718,24 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
|
|||
** on this hint by avoiding the use of [lookaside memory] so as not to
|
||||
** deplete the limited store of lookaside memory. Future versions of
|
||||
** SQLite may act on this hint differently.
|
||||
**
|
||||
** [[SQLITE_PREPARE_NORMALIZE]] <dt>SQLITE_PREPARE_NORMALIZE</dt>
|
||||
** <dd>The SQLITE_PREPARE_NORMALIZE flag is a no-op. This flag used
|
||||
** to be required for any prepared statement that wanted to use the
|
||||
** [sqlite3_normalized_sql()] interface. However, the
|
||||
** [sqlite3_normalized_sql()] interface is now available to all
|
||||
** prepared statements, regardless of whether or not they use this
|
||||
** flag.
|
||||
**
|
||||
** [[SQLITE_PREPARE_NO_VTAB]] <dt>SQLITE_PREPARE_NO_VTAB</dt>
|
||||
** <dd>The SQLITE_PREPARE_NO_VTAB flag causes the SQL compiler
|
||||
** to return an error (error code SQLITE_ERROR) if the statement uses
|
||||
** any virtual tables.
|
||||
** </dl>
|
||||
*/
|
||||
#define SQLITE_PREPARE_PERSISTENT 0x01
|
||||
#define SQLITE_PREPARE_NORMALIZE 0x02
|
||||
#define SQLITE_PREPARE_NO_VTAB 0x04
|
||||
|
||||
/*
|
||||
** CAPI3REF: Compiling An SQL Statement
|
||||
|
@ -3704,7 +3827,7 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
|
|||
** ^The specific value of WHERE-clause [parameter] might influence the
|
||||
** choice of query plan if the parameter is the left-hand side of a [LIKE]
|
||||
** or [GLOB] operator or if the parameter is compared to an indexed column
|
||||
** and the [SQLITE_ENABLE_STAT3] compile-time option is enabled.
|
||||
** and the [SQLITE_ENABLE_STAT4] compile-time option is enabled.
|
||||
** </li>
|
||||
** </ol>
|
||||
**
|
||||
|
@ -3770,6 +3893,11 @@ SQLITE_API int sqlite3_prepare16_v3(
|
|||
** ^The sqlite3_expanded_sql(P) interface returns a pointer to a UTF-8
|
||||
** string containing the SQL text of prepared statement P with
|
||||
** [bound parameters] expanded.
|
||||
** ^The sqlite3_normalized_sql(P) interface returns a pointer to a UTF-8
|
||||
** string containing the normalized SQL text of prepared statement P. The
|
||||
** semantics used to normalize a SQL statement are unspecified and subject
|
||||
** to change. At a minimum, literal values will be replaced with suitable
|
||||
** placeholders.
|
||||
**
|
||||
** ^(For example, if a prepared statement is created using the SQL
|
||||
** text "SELECT $abc,:xyz" and if parameter $abc is bound to integer 2345
|
||||
|
@ -3785,14 +3913,16 @@ SQLITE_API int sqlite3_prepare16_v3(
|
|||
** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time
|
||||
** option causes sqlite3_expanded_sql() to always return NULL.
|
||||
**
|
||||
** ^The string returned by sqlite3_sql(P) is managed by SQLite and is
|
||||
** automatically freed when the prepared statement is finalized.
|
||||
** ^The strings returned by sqlite3_sql(P) and sqlite3_normalized_sql(P)
|
||||
** are managed by SQLite and are automatically freed when the prepared
|
||||
** statement is finalized.
|
||||
** ^The string returned by sqlite3_expanded_sql(P), on the other hand,
|
||||
** is obtained from [sqlite3_malloc()] and must be free by the application
|
||||
** by passing it to [sqlite3_free()].
|
||||
*/
|
||||
SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt);
|
||||
SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt);
|
||||
SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt);
|
||||
|
||||
/*
|
||||
** CAPI3REF: Determine If An SQL Statement Writes The Database
|
||||
|
@ -3830,6 +3960,18 @@ SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt);
|
|||
*/
|
||||
SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
|
||||
|
||||
/*
|
||||
** CAPI3REF: Query The EXPLAIN Setting For A Prepared Statement
|
||||
** METHOD: sqlite3_stmt
|
||||
**
|
||||
** ^The sqlite3_stmt_isexplain(S) interface returns 1 if the
|
||||
** prepared statement S is an EXPLAIN statement, or 2 if the
|
||||
** statement S is an EXPLAIN QUERY PLAN.
|
||||
** ^The sqlite3_stmt_isexplain(S) interface returns 0 if S is
|
||||
** an ordinary statement or a NULL pointer.
|
||||
*/
|
||||
SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt);
|
||||
|
||||
/*
|
||||
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
|
||||
** METHOD: sqlite3_stmt
|
||||
|
@ -3969,7 +4111,9 @@ typedef struct sqlite3_context sqlite3_context;
|
|||
** ^The fifth argument to the BLOB and string binding interfaces
|
||||
** is a destructor used to dispose of the BLOB or
|
||||
** string after SQLite has finished with it. ^The destructor is called
|
||||
** to dispose of the BLOB or string even if the call to bind API fails.
|
||||
** to dispose of the BLOB or string even if the call to the bind API fails,
|
||||
** except the destructor is not called if the third parameter is a NULL
|
||||
** pointer or the fourth parameter is negative.
|
||||
** ^If the fifth argument is
|
||||
** the special value [SQLITE_STATIC], then SQLite assumes that the
|
||||
** information is in static, unmanaged space and does not need to be freed.
|
||||
|
@ -4718,6 +4862,12 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
|
|||
** perform additional optimizations on deterministic functions, so use
|
||||
** of the [SQLITE_DETERMINISTIC] flag is recommended where possible.
|
||||
**
|
||||
** ^The fourth parameter may also optionally include the [SQLITE_DIRECTONLY]
|
||||
** flag, which if present prevents the function from being invoked from
|
||||
** within VIEWs or TRIGGERs. For security reasons, the [SQLITE_DIRECTONLY]
|
||||
** flag is recommended for any application-defined SQL function that has
|
||||
** side-effects.
|
||||
**
|
||||
** ^(The fifth parameter is an arbitrary pointer. The implementation of the
|
||||
** function can gain access to this pointer using [sqlite3_user_data()].)^
|
||||
**
|
||||
|
@ -4834,8 +4984,30 @@ SQLITE_API int sqlite3_create_window_function(
|
|||
** [SQLITE_UTF8 | preferred text encoding] as the fourth argument
|
||||
** to [sqlite3_create_function()], [sqlite3_create_function16()], or
|
||||
** [sqlite3_create_function_v2()].
|
||||
**
|
||||
** The SQLITE_DETERMINISTIC flag means that the new function will always
|
||||
** maps the same inputs into the same output. The abs() function is
|
||||
** deterministic, for example, but randomblob() is not.
|
||||
**
|
||||
** The SQLITE_DIRECTONLY flag means that the function may only be invoked
|
||||
** from top-level SQL, and cannot be used in VIEWs or TRIGGERs. This is
|
||||
** a security feature which is recommended for all
|
||||
** [application-defined SQL functions] that have side-effects. This flag
|
||||
** prevents an attacker from adding triggers and views to a schema then
|
||||
** tricking a high-privilege application into causing unintended side-effects
|
||||
** while performing ordinary queries.
|
||||
**
|
||||
** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call
|
||||
** [sqlite3_value_subtype()] to inspect the sub-types of its arguments.
|
||||
** Specifying this flag makes no difference for scalar or aggregate user
|
||||
** functions. However, if it is not specified for a user-defined window
|
||||
** function, then any sub-types belonging to arguments passed to the window
|
||||
** function may be discarded before the window function is called (i.e.
|
||||
** sqlite3_value_subtype() will always return 0).
|
||||
*/
|
||||
#define SQLITE_DETERMINISTIC 0x800
|
||||
#define SQLITE_DETERMINISTIC 0x000000800
|
||||
#define SQLITE_DIRECTONLY 0x000080000
|
||||
#define SQLITE_SUBTYPE 0x000100000
|
||||
|
||||
/*
|
||||
** CAPI3REF: Deprecated Functions
|
||||
|
@ -4886,6 +5058,8 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6
|
|||
** <tr><td><b>sqlite3_value_nochange </b>
|
||||
** <td>→ <td>True if the column is unchanged in an UPDATE
|
||||
** against a virtual table.
|
||||
** <tr><td><b>sqlite3_value_frombind </b>
|
||||
** <td>→ <td>True if value originated from a [bound parameter]
|
||||
** </table></blockquote>
|
||||
**
|
||||
** <b>Details:</b>
|
||||
|
@ -4947,6 +5121,11 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6
|
|||
** than within an [xUpdate] method call for an UPDATE statement, then
|
||||
** the return value is arbitrary and meaningless.
|
||||
**
|
||||
** ^The sqlite3_value_frombind(X) interface returns non-zero if the
|
||||
** value X originated from one of the [sqlite3_bind_int|sqlite3_bind()]
|
||||
** interfaces. ^If X comes from an SQL literal value, or a table column,
|
||||
** and expression, then sqlite3_value_frombind(X) returns zero.
|
||||
**
|
||||
** Please pay particular attention to the fact that the pointer returned
|
||||
** from [sqlite3_value_blob()], [sqlite3_value_text()], or
|
||||
** [sqlite3_value_text16()] can be invalidated by a subsequent call to
|
||||
|
@ -4992,6 +5171,7 @@ SQLITE_API int sqlite3_value_bytes16(sqlite3_value*);
|
|||
SQLITE_API int sqlite3_value_type(sqlite3_value*);
|
||||
SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
|
||||
SQLITE_API int sqlite3_value_nochange(sqlite3_value*);
|
||||
SQLITE_API int sqlite3_value_frombind(sqlite3_value*);
|
||||
|
||||
/*
|
||||
** CAPI3REF: Finding The Subtype Of SQL Values
|
||||
|
@ -5727,7 +5907,7 @@ SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
|
|||
** associated with database N of connection D. ^The main database file
|
||||
** has the name "main". If there is no attached database N on the database
|
||||
** connection D, or if database N is a temporary or in-memory database, then
|
||||
** a NULL pointer is returned.
|
||||
** this function will return either a NULL pointer or an empty string.
|
||||
**
|
||||
** ^The filename returned by this function is the output of the
|
||||
** xFullPathname method of the [VFS]. ^In other words, the filename
|
||||
|
@ -6282,6 +6462,9 @@ struct sqlite3_module {
|
|||
int (*xSavepoint)(sqlite3_vtab *pVTab, int);
|
||||
int (*xRelease)(sqlite3_vtab *pVTab, int);
|
||||
int (*xRollbackTo)(sqlite3_vtab *pVTab, int);
|
||||
/* The methods above are in versions 1 and 2 of the sqlite_module object.
|
||||
** Those below are for version 3 and greater. */
|
||||
int (*xShadowName)(const char*);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -6470,6 +6653,12 @@ struct sqlite3_index_info {
|
|||
** ^The sqlite3_create_module()
|
||||
** interface is equivalent to sqlite3_create_module_v2() with a NULL
|
||||
** destructor.
|
||||
**
|
||||
** ^If the third parameter (the pointer to the sqlite3_module object) is
|
||||
** NULL then no new module is create and any existing modules with the
|
||||
** same name are dropped.
|
||||
**
|
||||
** See also: [sqlite3_drop_modules()]
|
||||
*/
|
||||
SQLITE_API int sqlite3_create_module(
|
||||
sqlite3 *db, /* SQLite connection to register module with */
|
||||
|
@ -6485,6 +6674,23 @@ SQLITE_API int sqlite3_create_module_v2(
|
|||
void(*xDestroy)(void*) /* Module destructor function */
|
||||
);
|
||||
|
||||
/*
|
||||
** CAPI3REF: Remove Unnecessary Virtual Table Implementations
|
||||
** METHOD: sqlite3
|
||||
**
|
||||
** ^The sqlite3_drop_modules(D,L) interface removes all virtual
|
||||
** table modules from database connection D except those named on list L.
|
||||
** The L parameter must be either NULL or a pointer to an array of pointers
|
||||
** to strings where the array is terminated by a single NULL pointer.
|
||||
** ^If the L parameter is NULL, then all virtual table modules are removed.
|
||||
**
|
||||
** See also: [sqlite3_create_module()]
|
||||
*/
|
||||
SQLITE_API int sqlite3_drop_modules(
|
||||
sqlite3 *db, /* Remove modules from this connection */
|
||||
const char **azKeep /* Except, do not remove the ones named here */
|
||||
);
|
||||
|
||||
/*
|
||||
** CAPI3REF: Virtual Table Instance Object
|
||||
** KEYWORDS: sqlite3_vtab
|
||||
|
@ -7193,7 +7399,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
|
|||
#define SQLITE_TESTCTRL_FIRST 5
|
||||
#define SQLITE_TESTCTRL_PRNG_SAVE 5
|
||||
#define SQLITE_TESTCTRL_PRNG_RESTORE 6
|
||||
#define SQLITE_TESTCTRL_PRNG_RESET 7
|
||||
#define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */
|
||||
#define SQLITE_TESTCTRL_BITVEC_TEST 8
|
||||
#define SQLITE_TESTCTRL_FAULT_INSTALL 9
|
||||
#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10
|
||||
|
@ -7204,6 +7410,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
|
|||
#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
|
||||
#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */
|
||||
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */
|
||||
#define SQLITE_TESTCTRL_INTERNAL_FUNCTIONS 17
|
||||
#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18
|
||||
#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */
|
||||
#define SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD 19
|
||||
|
@ -7214,7 +7421,10 @@ SQLITE_API int sqlite3_test_control(int op, ...);
|
|||
#define SQLITE_TESTCTRL_SORTER_MMAP 24
|
||||
#define SQLITE_TESTCTRL_IMPOSTER 25
|
||||
#define SQLITE_TESTCTRL_PARSER_COVERAGE 26
|
||||
#define SQLITE_TESTCTRL_LAST 26 /* Largest TESTCTRL */
|
||||
#define SQLITE_TESTCTRL_RESULT_INTREAL 27
|
||||
#define SQLITE_TESTCTRL_PRNG_SEED 28
|
||||
#define SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS 29
|
||||
#define SQLITE_TESTCTRL_LAST 29 /* Largest TESTCTRL */
|
||||
|
||||
/*
|
||||
** CAPI3REF: SQL Keyword Checking
|
||||
|
@ -8616,6 +8826,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
|
|||
** can use to customize and optimize their behavior.
|
||||
**
|
||||
** <dl>
|
||||
** [[SQLITE_VTAB_CONSTRAINT_SUPPORT]]
|
||||
** <dt>SQLITE_VTAB_CONSTRAINT_SUPPORT
|
||||
** <dd>Calls of the form
|
||||
** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported,
|
||||
|
@ -9385,7 +9596,7 @@ struct sqlite3_rtree_query_info {
|
|||
sqlite3_int64 iRowid; /* Rowid for current entry */
|
||||
sqlite3_rtree_dbl rParentScore; /* Score of parent node */
|
||||
int eParentWithin; /* Visibility of parent node */
|
||||
int eWithin; /* OUT: Visiblity */
|
||||
int eWithin; /* OUT: Visibility */
|
||||
sqlite3_rtree_dbl rScore; /* OUT: Write the score here */
|
||||
/* The following fields are only available in 3.8.11 and later */
|
||||
sqlite3_value **apSqlParam; /* Original SQL values of parameters */
|
||||
|
@ -9881,12 +10092,38 @@ SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession);
|
|||
** consecutively. There is no chance that the iterator will visit a change
|
||||
** the applies to table X, then one for table Y, and then later on visit
|
||||
** another change for table X.
|
||||
**
|
||||
** The behavior of sqlite3changeset_start_v2() and its streaming equivalent
|
||||
** may be modified by passing a combination of
|
||||
** [SQLITE_CHANGESETSTART_INVERT | supported flags] as the 4th parameter.
|
||||
**
|
||||
** Note that the sqlite3changeset_start_v2() API is still <b>experimental</b>
|
||||
** and therefore subject to change.
|
||||
*/
|
||||
SQLITE_API int sqlite3changeset_start(
|
||||
sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
|
||||
int nChangeset, /* Size of changeset blob in bytes */
|
||||
void *pChangeset /* Pointer to blob containing changeset */
|
||||
);
|
||||
SQLITE_API int sqlite3changeset_start_v2(
|
||||
sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
|
||||
int nChangeset, /* Size of changeset blob in bytes */
|
||||
void *pChangeset, /* Pointer to blob containing changeset */
|
||||
int flags /* SESSION_CHANGESETSTART_* flags */
|
||||
);
|
||||
|
||||
/*
|
||||
** CAPI3REF: Flags for sqlite3changeset_start_v2
|
||||
**
|
||||
** The following flags may passed via the 4th parameter to
|
||||
** [sqlite3changeset_start_v2] and [sqlite3changeset_start_v2_strm]:
|
||||
**
|
||||
** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>
|
||||
** Invert the changeset while iterating through it. This is equivalent to
|
||||
** inverting a changeset using sqlite3changeset_invert() before applying it.
|
||||
** It is an error to specify this flag with a patchset.
|
||||
*/
|
||||
#define SQLITE_CHANGESETSTART_INVERT 0x0002
|
||||
|
||||
|
||||
/*
|
||||
|
@ -9930,7 +10167,7 @@ SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
|
|||
** sqlite3changeset_next() is called on the iterator or until the
|
||||
** conflict-handler function returns. If pnCol is not NULL, then *pnCol is
|
||||
** set to the number of columns in the table affected by the change. If
|
||||
** pbIncorrect is not NULL, then *pbIndirect is set to true (1) if the change
|
||||
** pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change
|
||||
** is an indirect change, or false (0) otherwise. See the documentation for
|
||||
** [sqlite3session_indirect()] for a description of direct and indirect
|
||||
** changes. Finally, if pOp is not NULL, then *pOp is set to one of
|
||||
|
@ -10541,7 +10778,7 @@ SQLITE_API int sqlite3changeset_apply_v2(
|
|||
),
|
||||
void *pCtx, /* First argument passed to xConflict */
|
||||
void **ppRebase, int *pnRebase, /* OUT: Rebase data */
|
||||
int flags /* Combination of SESSION_APPLY_* flags */
|
||||
int flags /* SESSION_CHANGESETAPPLY_* flags */
|
||||
);
|
||||
|
||||
/*
|
||||
|
@ -10559,8 +10796,14 @@ SQLITE_API int sqlite3changeset_apply_v2(
|
|||
** causes the sessions module to omit this savepoint. In this case, if the
|
||||
** caller has an open transaction or savepoint when apply_v2() is called,
|
||||
** it may revert the partially applied changeset by rolling it back.
|
||||
**
|
||||
** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>
|
||||
** Invert the changeset before applying it. This is equivalent to inverting
|
||||
** a changeset using sqlite3changeset_invert() before applying it. It is
|
||||
** an error to specify this flag with a patchset.
|
||||
*/
|
||||
#define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001
|
||||
#define SQLITE_CHANGESETAPPLY_INVERT 0x0002
|
||||
|
||||
/*
|
||||
** CAPI3REF: Constants Passed To The Conflict Handler
|
||||
|
@ -10791,7 +11034,7 @@ SQLITE_API int sqlite3rebaser_configure(
|
|||
** in size. This function allocates and populates a buffer with a copy
|
||||
** of the changeset rebased rebased according to the configuration of the
|
||||
** rebaser object passed as the first argument. If successful, (*ppOut)
|
||||
** is set to point to the new buffer containing the rebased changset and
|
||||
** is set to point to the new buffer containing the rebased changeset and
|
||||
** (*pnOut) to its size in bytes and SQLITE_OK returned. It is the
|
||||
** responsibility of the caller to eventually free the new buffer using
|
||||
** sqlite3_free(). Otherwise, if an error occurs, (*ppOut) and (*pnOut)
|
||||
|
@ -10954,6 +11197,12 @@ SQLITE_API int sqlite3changeset_start_strm(
|
|||
int (*xInput)(void *pIn, void *pData, int *pnData),
|
||||
void *pIn
|
||||
);
|
||||
SQLITE_API int sqlite3changeset_start_v2_strm(
|
||||
sqlite3_changeset_iter **pp,
|
||||
int (*xInput)(void *pIn, void *pData, int *pnData),
|
||||
void *pIn,
|
||||
int flags
|
||||
);
|
||||
SQLITE_API int sqlite3session_changeset_strm(
|
||||
sqlite3_session *pSession,
|
||||
int (*xOutput)(void *pOut, const void *pData, int nData),
|
||||
|
@ -10980,6 +11229,45 @@ SQLITE_API int sqlite3rebaser_rebase_strm(
|
|||
void *pOut
|
||||
);
|
||||
|
||||
/*
|
||||
** CAPI3REF: Configure global parameters
|
||||
**
|
||||
** The sqlite3session_config() interface is used to make global configuration
|
||||
** changes to the sessions module in order to tune it to the specific needs
|
||||
** of the application.
|
||||
**
|
||||
** The sqlite3session_config() interface is not threadsafe. If it is invoked
|
||||
** while any other thread is inside any other sessions method then the
|
||||
** results are undefined. Furthermore, if it is invoked after any sessions
|
||||
** related objects have been created, the results are also undefined.
|
||||
**
|
||||
** The first argument to the sqlite3session_config() function must be one
|
||||
** of the SQLITE_SESSION_CONFIG_XXX constants defined below. The
|
||||
** interpretation of the (void*) value passed as the second parameter and
|
||||
** the effect of calling this function depends on the value of the first
|
||||
** parameter.
|
||||
**
|
||||
** <dl>
|
||||
** <dt>SQLITE_SESSION_CONFIG_STRMSIZE<dd>
|
||||
** By default, the sessions module streaming interfaces attempt to input
|
||||
** and output data in approximately 1 KiB chunks. This operand may be used
|
||||
** to set and query the value of this configuration setting. The pointer
|
||||
** passed as the second argument must point to a value of type (int).
|
||||
** If this value is greater than 0, it is used as the new streaming data
|
||||
** chunk size for both input and output. Before returning, the (int) value
|
||||
** pointed to by pArg is set to the final value of the streaming interface
|
||||
** chunk size.
|
||||
** </dl>
|
||||
**
|
||||
** This function returns SQLITE_OK if successful, or an SQLite error code
|
||||
** otherwise.
|
||||
*/
|
||||
SQLITE_API int sqlite3session_config(int op, void *pArg);
|
||||
|
||||
/*
|
||||
** CAPI3REF: Values for sqlite3session_config().
|
||||
*/
|
||||
#define SQLITE_SESSION_CONFIG_STRMSIZE 1
|
||||
|
||||
/*
|
||||
** Make sure we can call this stuff from C++.
|
||||
|
@ -11113,12 +11401,8 @@ struct Fts5PhraseIter {
|
|||
**
|
||||
** Usually, output parameter *piPhrase is set to the phrase number, *piCol
|
||||
** to the column in which it occurs and *piOff the token offset of the
|
||||
** first token of the phrase. The exception is if the table was created
|
||||
** with the offsets=0 option specified. In this case *piOff is always
|
||||
** set to -1.
|
||||
**
|
||||
** Returns SQLITE_OK if successful, or an error code (i.e. SQLITE_NOMEM)
|
||||
** if an error occurs.
|
||||
** first token of the phrase. Returns SQLITE_OK if successful, or an error
|
||||
** code (i.e. SQLITE_NOMEM) if an error occurs.
|
||||
**
|
||||
** This API can be quite slow if used with an FTS5 table created with the
|
||||
** "detail=none" or "detail=column" option.
|
||||
|
@ -11159,7 +11443,7 @@ struct Fts5PhraseIter {
|
|||
** Save the pointer passed as the second argument as the extension functions
|
||||
** "auxiliary data". The pointer may then be retrieved by the current or any
|
||||
** future invocation of the same fts5 extension function made as part of
|
||||
** of the same MATCH query using the xGetAuxdata() API.
|
||||
** the same MATCH query using the xGetAuxdata() API.
|
||||
**
|
||||
** Each extension function is allocated a single auxiliary data slot for
|
||||
** each FTS query (MATCH expression). If the extension function is invoked
|
||||
|
@ -11174,7 +11458,7 @@ struct Fts5PhraseIter {
|
|||
** The xDelete callback, if one is specified, is also invoked on the
|
||||
** auxiliary data pointer after the FTS5 query has finished.
|
||||
**
|
||||
** If an error (e.g. an OOM condition) occurs within this function, an
|
||||
** If an error (e.g. an OOM condition) occurs within this function,
|
||||
** the auxiliary data is set to NULL and an error code returned. If the
|
||||
** xDelete parameter was not NULL, it is invoked on the auxiliary data
|
||||
** pointer before returning.
|
||||
|
@ -11407,11 +11691,11 @@ struct Fts5ExtensionApi {
|
|||
** the tokenizer substitutes "first" for "1st" and the query works
|
||||
** as expected.
|
||||
**
|
||||
** <li> By adding multiple synonyms for a single term to the FTS index.
|
||||
** In this case, when tokenizing query text, the tokenizer may
|
||||
** provide multiple synonyms for a single term within the document.
|
||||
** FTS5 then queries the index for each synonym individually. For
|
||||
** example, faced with the query:
|
||||
** <li> By querying the index for all synonyms of each query term
|
||||
** separately. In this case, when tokenizing query text, the
|
||||
** tokenizer may provide multiple synonyms for a single term
|
||||
** within the document. FTS5 then queries the index for each
|
||||
** synonym individually. For example, faced with the query:
|
||||
**
|
||||
** <codeblock>
|
||||
** ... MATCH 'first place'</codeblock>
|
||||
|
@ -11435,7 +11719,7 @@ struct Fts5ExtensionApi {
|
|||
** "place".
|
||||
**
|
||||
** This way, even if the tokenizer does not provide synonyms
|
||||
** when tokenizing query text (it should not - to do would be
|
||||
** when tokenizing query text (it should not - to do so would be
|
||||
** inefficient), it doesn't matter if the user queries for
|
||||
** 'first + place' or '1st + place', as there are entries in the
|
||||
** FTS index corresponding to both forms of the first token.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
|
@ -328,7 +328,7 @@ type SQLiteRows struct {
|
|||
decltype []string
|
||||
cls bool
|
||||
closed bool
|
||||
done chan struct{}
|
||||
ctx context.Context // no better alternative to pass context into Next() method
|
||||
}
|
||||
|
||||
type functionInfo struct {
|
||||
|
@ -683,7 +683,7 @@ func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool
|
|||
ai.stepArgConverters = append(ai.stepArgConverters, conv)
|
||||
}
|
||||
if step.IsVariadic() {
|
||||
conv, err := callbackArg(t.In(start + stepNArgs).Elem())
|
||||
conv, err := callbackArg(step.In(start + stepNArgs).Elem())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -740,6 +740,8 @@ func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool
|
|||
|
||||
// AutoCommit return which currently auto commit or not.
|
||||
func (c *SQLiteConn) AutoCommit() bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return int(C.sqlite3_get_autocommit(c.db)) != 0
|
||||
}
|
||||
|
||||
|
@ -998,7 +1000,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
|
|||
deferForeignKeys := -1
|
||||
foreignKeys := -1
|
||||
ignoreCheckConstraints := -1
|
||||
journalMode := "DELETE"
|
||||
var journalMode string
|
||||
lockingMode := "NORMAL"
|
||||
queryOnly := -1
|
||||
recursiveTriggers := -1
|
||||
|
@ -1230,7 +1232,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
|
|||
if _, ok := params["_locking"]; ok {
|
||||
pkey = "_locking"
|
||||
}
|
||||
if val := params.Get("_locking"); val != "" {
|
||||
if val := params.Get(pkey); val != "" {
|
||||
switch strings.ToUpper(val) {
|
||||
case "NORMAL", "EXCLUSIVE":
|
||||
lockingMode = strings.ToUpper(val)
|
||||
|
@ -1340,6 +1342,9 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
|
|||
mutex|C.SQLITE_OPEN_READWRITE|C.SQLITE_OPEN_CREATE,
|
||||
nil)
|
||||
if rv != 0 {
|
||||
if db != nil {
|
||||
C.sqlite3_close_v2(db)
|
||||
}
|
||||
return nil, Error{Code: ErrNo(rv)}
|
||||
}
|
||||
if db == nil {
|
||||
|
@ -1376,7 +1381,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
|
|||
// - Activate User Authentication
|
||||
// Check if the user wants to activate User Authentication.
|
||||
// If so then first create a temporary AuthConn to the database
|
||||
// This is possible because we are already succesfully authenticated.
|
||||
// This is possible because we are already successfully authenticated.
|
||||
//
|
||||
// - Check if `sqlite_user`` table exists
|
||||
// YES => Add the provided user from DSN as Admin User and
|
||||
|
@ -1387,7 +1392,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
|
|||
// Create connection to SQLite
|
||||
conn := &SQLiteConn{db: db, loc: loc, txlock: txlock}
|
||||
|
||||
// Password Cipher has to be registerd before authentication
|
||||
// Password Cipher has to be registered before authentication
|
||||
if len(authCrypt) > 0 {
|
||||
switch strings.ToUpper(authCrypt) {
|
||||
case "SHA1":
|
||||
|
@ -1517,10 +1522,10 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
|
|||
// Before going any further, we need to check that the user
|
||||
// has provided an username and password within the DSN.
|
||||
// We are not allowed to continue.
|
||||
if len(authUser) < 0 {
|
||||
if len(authUser) == 0 {
|
||||
return nil, fmt.Errorf("Missing '_auth_user' while user authentication was requested with '_auth'")
|
||||
}
|
||||
if len(authPass) < 0 {
|
||||
if len(authPass) == 0 {
|
||||
return nil, fmt.Errorf("Missing '_auth_pass' while user authentication was requested with '_auth'")
|
||||
}
|
||||
|
||||
|
@ -1566,10 +1571,11 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
|
|||
}
|
||||
|
||||
// Journal Mode
|
||||
// Because default Journal Mode is DELETE this PRAGMA can always be executed.
|
||||
if err := exec(fmt.Sprintf("PRAGMA journal_mode = %s;", journalMode)); err != nil {
|
||||
C.sqlite3_close_v2(db)
|
||||
return nil, err
|
||||
if journalMode != "" {
|
||||
if err := exec(fmt.Sprintf("PRAGMA journal_mode = %s;", journalMode)); err != nil {
|
||||
C.sqlite3_close_v2(db)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Locking Mode
|
||||
|
@ -1674,7 +1680,7 @@ func (c *SQLiteConn) prepare(ctx context.Context, query string) (driver.Stmt, er
|
|||
defer C.free(unsafe.Pointer(pquery))
|
||||
var s *C.sqlite3_stmt
|
||||
var tail *C.char
|
||||
rv := C._sqlite3_prepare_v2_internal(c.db, pquery, -1, &s, &tail)
|
||||
rv := C._sqlite3_prepare_v2_internal(c.db, pquery, C.int(-1), &s, &tail)
|
||||
if rv != C.SQLITE_OK {
|
||||
return nil, c.lastError()
|
||||
}
|
||||
|
@ -1718,7 +1724,7 @@ func (c *SQLiteConn) GetFilename(schemaName string) string {
|
|||
// GetLimit returns the current value of a run-time limit.
|
||||
// See: sqlite3_limit, http://www.sqlite.org/c3ref/limit.html
|
||||
func (c *SQLiteConn) GetLimit(id int) int {
|
||||
return int(C._sqlite3_limit(c.db, C.int(id), -1))
|
||||
return int(C._sqlite3_limit(c.db, C.int(id), C.int(-1)))
|
||||
}
|
||||
|
||||
// SetLimit changes the value of a run-time limits.
|
||||
|
@ -1841,28 +1847,13 @@ func (s *SQLiteStmt) query(ctx context.Context, args []namedValue) (driver.Rows,
|
|||
decltype: nil,
|
||||
cls: s.cls,
|
||||
closed: false,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
if ctxdone := ctx.Done(); ctxdone != nil {
|
||||
go func(db *C.sqlite3) {
|
||||
select {
|
||||
case <-ctxdone:
|
||||
select {
|
||||
case <-rows.done:
|
||||
default:
|
||||
C.sqlite3_interrupt(db)
|
||||
rows.Close()
|
||||
}
|
||||
case <-rows.done:
|
||||
}
|
||||
}(s.c.db)
|
||||
ctx: ctx,
|
||||
}
|
||||
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
// LastInsertId teturn last inserted ID.
|
||||
// LastInsertId return last inserted ID.
|
||||
func (r *SQLiteResult) LastInsertId() (int64, error) {
|
||||
return r.id, nil
|
||||
}
|
||||
|
@ -1884,29 +1875,43 @@ func (s *SQLiteStmt) Exec(args []driver.Value) (driver.Result, error) {
|
|||
return s.exec(context.Background(), list)
|
||||
}
|
||||
|
||||
// exec executes a query that doesn't return rows. Attempts to honor context timeout.
|
||||
func (s *SQLiteStmt) exec(ctx context.Context, args []namedValue) (driver.Result, error) {
|
||||
if ctx.Done() == nil {
|
||||
return s.execSync(args)
|
||||
}
|
||||
|
||||
type result struct {
|
||||
r driver.Result
|
||||
err error
|
||||
}
|
||||
resultCh := make(chan result)
|
||||
go func() {
|
||||
r, err := s.execSync(args)
|
||||
resultCh <- result{r, err}
|
||||
}()
|
||||
select {
|
||||
case rv := <-resultCh:
|
||||
return rv.r, rv.err
|
||||
case <-ctx.Done():
|
||||
select {
|
||||
case <-resultCh: // no need to interrupt
|
||||
default:
|
||||
// this is still racy and can be no-op if executed between sqlite3_* calls in execSync.
|
||||
C.sqlite3_interrupt(s.c.db)
|
||||
<-resultCh // ensure goroutine completed
|
||||
}
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SQLiteStmt) execSync(args []namedValue) (driver.Result, error) {
|
||||
if err := s.bind(args); err != nil {
|
||||
C.sqlite3_reset(s.s)
|
||||
C.sqlite3_clear_bindings(s.s)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ctxdone := ctx.Done(); ctxdone != nil {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
go func(db *C.sqlite3) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctxdone:
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
C.sqlite3_interrupt(db)
|
||||
}
|
||||
}
|
||||
}(s.c.db)
|
||||
}
|
||||
|
||||
var rowid, changes C.longlong
|
||||
rv := C._sqlite3_step_row_internal(s.s, &rowid, &changes)
|
||||
if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE {
|
||||
|
@ -1927,9 +1932,6 @@ func (rc *SQLiteRows) Close() error {
|
|||
return nil
|
||||
}
|
||||
rc.closed = true
|
||||
if rc.done != nil {
|
||||
close(rc.done)
|
||||
}
|
||||
if rc.cls {
|
||||
rc.s.mu.Unlock()
|
||||
return rc.s.Close()
|
||||
|
@ -1973,13 +1975,39 @@ func (rc *SQLiteRows) DeclTypes() []string {
|
|||
return rc.declTypes()
|
||||
}
|
||||
|
||||
// Next move cursor to next.
|
||||
// Next move cursor to next. Attempts to honor context timeout from QueryContext call.
|
||||
func (rc *SQLiteRows) Next(dest []driver.Value) error {
|
||||
rc.s.mu.Lock()
|
||||
defer rc.s.mu.Unlock()
|
||||
|
||||
if rc.s.closed {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
if rc.ctx.Done() == nil {
|
||||
return rc.nextSyncLocked(dest)
|
||||
}
|
||||
resultCh := make(chan error)
|
||||
go func() {
|
||||
resultCh <- rc.nextSyncLocked(dest)
|
||||
}()
|
||||
select {
|
||||
case err := <-resultCh:
|
||||
return err
|
||||
case <-rc.ctx.Done():
|
||||
select {
|
||||
case <-resultCh: // no need to interrupt
|
||||
default:
|
||||
// this is still racy and can be no-op if executed between sqlite3_* calls in nextSyncLocked.
|
||||
C.sqlite3_interrupt(rc.s.c.db)
|
||||
<-resultCh // ensure goroutine completed
|
||||
}
|
||||
return rc.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// nextSyncLocked moves cursor to next; must be called with locked mutex.
|
||||
func (rc *SQLiteRows) nextSyncLocked(dest []driver.Value) error {
|
||||
rv := C._sqlite3_step_internal(rc.s.s)
|
||||
if rv == C.SQLITE_DONE {
|
||||
return io.EOF
|
||||
|
@ -2024,16 +2052,11 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
|
|||
case C.SQLITE_BLOB:
|
||||
p := C.sqlite3_column_blob(rc.s.s, C.int(i))
|
||||
if p == nil {
|
||||
dest[i] = nil
|
||||
dest[i] = []byte{}
|
||||
continue
|
||||
}
|
||||
n := int(C.sqlite3_column_bytes(rc.s.s, C.int(i)))
|
||||
switch dest[i].(type) {
|
||||
default:
|
||||
slice := make([]byte, n)
|
||||
copy(slice[:], (*[1 << 30]byte)(p)[0:n])
|
||||
dest[i] = slice
|
||||
}
|
||||
n := C.sqlite3_column_bytes(rc.s.s, C.int(i))
|
||||
dest[i] = C.GoBytes(p, n)
|
||||
case C.SQLITE_NULL:
|
||||
dest[i] = nil
|
||||
case C.SQLITE_TEXT:
|
||||
|
@ -2062,9 +2085,8 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
|
|||
}
|
||||
dest[i] = t
|
||||
default:
|
||||
dest[i] = []byte(s)
|
||||
dest[i] = s
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
// This file provides several different implementations for the
|
||||
// default embedded sqlite_crypt function.
|
||||
// This function is uses a ceasar-cypher by default
|
||||
// This function is uses a caesar-cypher by default
|
||||
// and is used within the UserAuthentication module to encode
|
||||
// the password.
|
||||
//
|
||||
|
@ -40,7 +40,7 @@ import (
|
|||
// password X, sqlite_crypt(X,NULL) is run. A new random salt is selected
|
||||
// when the second argument is NULL.
|
||||
//
|
||||
// The built-in version of of sqlite_crypt() uses a simple Ceasar-cypher
|
||||
// The built-in version of of sqlite_crypt() uses a simple Caesar-cypher
|
||||
// which prevents passwords from being revealed by searching the raw database
|
||||
// for ASCII text, but is otherwise trivally broken. For better password
|
||||
// security, the database should be encrypted using the SQLite Encryption
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
@ -10,7 +10,6 @@ package sqlite3
|
|||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
|
||||
"context"
|
||||
)
|
||||
|
@ -18,7 +17,8 @@ import (
|
|||
// Ping implement Pinger.
|
||||
func (c *SQLiteConn) Ping(ctx context.Context) error {
|
||||
if c.db == nil {
|
||||
return errors.New("Connection was closed")
|
||||
// must be ErrBadConn for sql to close the database
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
@ -11,6 +11,7 @@ package sqlite3
|
|||
#cgo CFLAGS: -DUSE_LIBSQLITE3
|
||||
#cgo linux LDFLAGS: -lsqlite3
|
||||
#cgo darwin LDFLAGS: -L/usr/local/opt/sqlite/lib -lsqlite3
|
||||
#cgo darwin CFLAGS: -I/usr/local/opt/sqlite/include
|
||||
#cgo openbsd LDFLAGS: -lsqlite3
|
||||
#cgo solaris LDFLAGS: -lsqlite3
|
||||
*/
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
|
||||
// Use of this source code is governed by an MIT-style
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
// Copyright (C) 2019 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
// Copyright (C) 2018 segment.com <friends@segment.com>
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build cgo
|
||||
|
||||
package sqlite3
|
||||
|
||||
// SQLitePreUpdateData represents all of the data available during a
|
||||
// pre-update hook call.
|
||||
type SQLitePreUpdateData struct {
|
||||
Conn *SQLiteConn
|
||||
Op int
|
||||
DatabaseName string
|
||||
TableName string
|
||||
OldRowID int64
|
||||
NewRowID int64
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
// Copyright (C) 2019 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
// Copyright (C) 2018 segment.com <friends@segment.com>
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build sqlite_preupdate_hook
|
||||
|
||||
package sqlite3
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -DSQLITE_ENABLE_PREUPDATE_HOOK
|
||||
#cgo LDFLAGS: -lm
|
||||
|
||||
#ifndef USE_LIBSQLITE3
|
||||
#include <sqlite3-binding.h>
|
||||
#else
|
||||
#include <sqlite3.h>
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
void preUpdateHookTrampoline(void*, sqlite3 *, int, char *, char *, sqlite3_int64, sqlite3_int64);
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"errors"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// RegisterPreUpdateHook sets the pre-update hook for a connection.
|
||||
//
|
||||
// The callback is passed a SQLitePreUpdateData struct with the data for
|
||||
// the update, as well as methods for fetching copies of impacted data.
|
||||
//
|
||||
// If there is an existing update hook for this connection, it will be
|
||||
// removed. If callback is nil the existing hook (if any) will be removed
|
||||
// without creating a new one.
|
||||
func (c *SQLiteConn) RegisterPreUpdateHook(callback func(SQLitePreUpdateData)) {
|
||||
if callback == nil {
|
||||
C.sqlite3_preupdate_hook(c.db, nil, nil)
|
||||
} else {
|
||||
C.sqlite3_preupdate_hook(c.db, (*[0]byte)(unsafe.Pointer(C.preUpdateHookTrampoline)), unsafe.Pointer(newHandle(c, callback)))
|
||||
}
|
||||
}
|
||||
|
||||
// Depth returns the source path of the write, see sqlite3_preupdate_depth()
|
||||
func (d *SQLitePreUpdateData) Depth() int {
|
||||
return int(C.sqlite3_preupdate_depth(d.Conn.db))
|
||||
}
|
||||
|
||||
// Count returns the number of columns in the row
|
||||
func (d *SQLitePreUpdateData) Count() int {
|
||||
return int(C.sqlite3_preupdate_count(d.Conn.db))
|
||||
}
|
||||
|
||||
func (d *SQLitePreUpdateData) row(dest []interface{}, new bool) error {
|
||||
for i := 0; i < d.Count() && i < len(dest); i++ {
|
||||
var val *C.sqlite3_value
|
||||
var src interface{}
|
||||
|
||||
// Initially I tried making this just a function pointer argument, but
|
||||
// it's absurdly complicated to pass C function pointers.
|
||||
if new {
|
||||
C.sqlite3_preupdate_new(d.Conn.db, C.int(i), &val)
|
||||
} else {
|
||||
C.sqlite3_preupdate_old(d.Conn.db, C.int(i), &val)
|
||||
}
|
||||
|
||||
switch C.sqlite3_value_type(val) {
|
||||
case C.SQLITE_INTEGER:
|
||||
src = int64(C.sqlite3_value_int64(val))
|
||||
case C.SQLITE_FLOAT:
|
||||
src = float64(C.sqlite3_value_double(val))
|
||||
case C.SQLITE_BLOB:
|
||||
len := C.sqlite3_value_bytes(val)
|
||||
blobptr := C.sqlite3_value_blob(val)
|
||||
src = C.GoBytes(blobptr, len)
|
||||
case C.SQLITE_TEXT:
|
||||
len := C.sqlite3_value_bytes(val)
|
||||
cstrptr := unsafe.Pointer(C.sqlite3_value_text(val))
|
||||
src = C.GoBytes(cstrptr, len)
|
||||
case C.SQLITE_NULL:
|
||||
src = nil
|
||||
}
|
||||
|
||||
err := convertAssign(&dest[i], src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Old populates dest with the row data to be replaced. This works similar to
|
||||
// database/sql's Rows.Scan()
|
||||
func (d *SQLitePreUpdateData) Old(dest ...interface{}) error {
|
||||
if d.Op == SQLITE_INSERT {
|
||||
return errors.New("There is no old row for INSERT operations")
|
||||
}
|
||||
return d.row(dest, false)
|
||||
}
|
||||
|
||||
// New populates dest with the replacement row data. This works similar to
|
||||
// database/sql's Rows.Scan()
|
||||
func (d *SQLitePreUpdateData) New(dest ...interface{}) error {
|
||||
if d.Op == SQLITE_DELETE {
|
||||
return errors.New("There is no new row for DELETE operations")
|
||||
}
|
||||
return d.row(dest, true)
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
// Copyright (C) 2019 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
// Copyright (C) 2018 segment.com <friends@segment.com>
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !sqlite_preupdate_hook,cgo
|
||||
|
||||
package sqlite3
|
||||
|
||||
// RegisterPreUpdateHook sets the pre-update hook for a connection.
|
||||
//
|
||||
// The callback is passed a SQLitePreUpdateData struct with the data for
|
||||
// the update, as well as methods for fetching copies of impacted data.
|
||||
//
|
||||
// If there is an existing update hook for this connection, it will be
|
||||
// removed. If callback is nil the existing hook (if any) will be removed
|
||||
// without creating a new one.
|
||||
func (c *SQLiteConn) RegisterPreUpdateHook(callback func(SQLitePreUpdateData)) {
|
||||
// NOOP
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2018 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2018 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2016 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
@ -89,6 +89,7 @@ func fillExpandedSQL(info *TraceInfo, db *C.sqlite3, pStmt unsafe.Pointer) {
|
|||
}
|
||||
|
||||
expSQLiteCStr := C.sqlite3_expanded_sql((*C.sqlite3_stmt)(pStmt))
|
||||
defer C.sqlite3_free(unsafe.Pointer(expSQLiteCStr))
|
||||
if expSQLiteCStr == nil {
|
||||
fillDBError(&info.DBError, db)
|
||||
return
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sqlite3
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
|
@ -311,12 +311,20 @@ struct sqlite3_api_routines {
|
|||
int (*str_errcode)(sqlite3_str*);
|
||||
int (*str_length)(sqlite3_str*);
|
||||
char *(*str_value)(sqlite3_str*);
|
||||
/* Version 3.25.0 and later */
|
||||
int (*create_window_function)(sqlite3*,const char*,int,int,void*,
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*),
|
||||
void (*xValue)(sqlite3_context*),
|
||||
void (*xInv)(sqlite3_context*,int,sqlite3_value**),
|
||||
void(*xDestroy)(void*));
|
||||
/* Version 3.26.0 and later */
|
||||
const char *(*normalized_sql)(sqlite3_stmt*);
|
||||
/* Version 3.28.0 and later */
|
||||
int (*stmt_isexplain)(sqlite3_stmt*);
|
||||
int (*value_frombind)(sqlite3_value*);
|
||||
/* Version 3.30.0 and later */
|
||||
int (*drop_modules)(sqlite3*,const char**);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -604,6 +612,13 @@ typedef int (*sqlite3_loadext_entry)(
|
|||
#define sqlite3_str_value sqlite3_api->str_value
|
||||
/* Version 3.25.0 and later */
|
||||
#define sqlite3_create_window_function sqlite3_api->create_window_function
|
||||
/* Version 3.26.0 and later */
|
||||
#define sqlite3_normalized_sql sqlite3_api->normalized_sql
|
||||
/* Version 3.28.0 and later */
|
||||
#define sqlite3_stmt_isexplain sqlite3_api->isexplain
|
||||
#define sqlite3_value_frombind sqlite3_api->frombind
|
||||
/* Version 3.30.0 and later */
|
||||
#define sqlite3_drop_modules sqlite3_api->drop_modules
|
||||
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
|
||||
|
||||
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !cgo
|
||||
|
||||
package sqlite3
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
language: go
|
||||
go_import_path: github.com/pkg/errors
|
||||
go:
|
||||
- 1.4.3
|
||||
- 1.5.4
|
||||
- 1.6.2
|
||||
- 1.7.1
|
||||
- 1.4.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- tip
|
||||
|
||||
script:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors)
|
||||
# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors) [](https://sourcegraph.com/github.com/pkg/errors?badge)
|
||||
|
||||
Package errors provides simple error handling primitives.
|
||||
|
||||
|
@ -47,6 +47,6 @@ We welcome pull requests, bug fixes and issue reports. With that said, the bar f
|
|||
|
||||
Before proposing a change, please discuss your change by raising an issue.
|
||||
|
||||
## Licence
|
||||
## License
|
||||
|
||||
BSD-2-Clause
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
// return err
|
||||
// }
|
||||
//
|
||||
// which applied recursively up the call stack results in error reports
|
||||
// which when applied recursively up the call stack results in error reports
|
||||
// without context or debugging information. The errors package allows
|
||||
// programmers to add context to the failure path in their code in a way
|
||||
// that does not destroy the original value of the error.
|
||||
|
@ -15,16 +15,17 @@
|
|||
//
|
||||
// The errors.Wrap function returns a new error that adds context to the
|
||||
// original error by recording a stack trace at the point Wrap is called,
|
||||
// and the supplied message. For example
|
||||
// together with the supplied message. For example
|
||||
//
|
||||
// _, err := ioutil.ReadAll(r)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "read failed")
|
||||
// }
|
||||
//
|
||||
// If additional control is required the errors.WithStack and errors.WithMessage
|
||||
// functions destructure errors.Wrap into its component operations of annotating
|
||||
// an error with a stack trace and an a message, respectively.
|
||||
// If additional control is required, the errors.WithStack and
|
||||
// errors.WithMessage functions destructure errors.Wrap into its component
|
||||
// operations: annotating an error with a stack trace and with a message,
|
||||
// respectively.
|
||||
//
|
||||
// Retrieving the cause of an error
|
||||
//
|
||||
|
@ -38,7 +39,7 @@
|
|||
// }
|
||||
//
|
||||
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
|
||||
// the topmost error which does not implement causer, which is assumed to be
|
||||
// the topmost error that does not implement causer, which is assumed to be
|
||||
// the original cause. For example:
|
||||
//
|
||||
// switch err := errors.Cause(err).(type) {
|
||||
|
@ -48,16 +49,16 @@
|
|||
// // unknown error
|
||||
// }
|
||||
//
|
||||
// causer interface is not exported by this package, but is considered a part
|
||||
// of stable public API.
|
||||
// Although the causer interface is not exported by this package, it is
|
||||
// considered a part of its stable public interface.
|
||||
//
|
||||
// Formatted printing of errors
|
||||
//
|
||||
// All error values returned from this package implement fmt.Formatter and can
|
||||
// be formatted by the fmt package. The following verbs are supported
|
||||
// be formatted by the fmt package. The following verbs are supported:
|
||||
//
|
||||
// %s print the error. If the error has a Cause it will be
|
||||
// printed recursively
|
||||
// printed recursively.
|
||||
// %v see %s
|
||||
// %+v extended format. Each Frame of the error's StackTrace will
|
||||
// be printed in detail.
|
||||
|
@ -65,13 +66,13 @@
|
|||
// Retrieving the stack trace of an error or wrapper
|
||||
//
|
||||
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
|
||||
// invoked. This information can be retrieved with the following interface.
|
||||
// invoked. This information can be retrieved with the following interface:
|
||||
//
|
||||
// type stackTracer interface {
|
||||
// StackTrace() errors.StackTrace
|
||||
// }
|
||||
//
|
||||
// Where errors.StackTrace is defined as
|
||||
// The returned errors.StackTrace type is defined as
|
||||
//
|
||||
// type StackTrace []Frame
|
||||
//
|
||||
|
@ -85,8 +86,8 @@
|
|||
// }
|
||||
// }
|
||||
//
|
||||
// stackTracer interface is not exported by this package, but is considered a part
|
||||
// of stable public API.
|
||||
// Although the stackTracer interface is not exported by this package, it is
|
||||
// considered a part of its stable public interface.
|
||||
//
|
||||
// See the documentation for Frame.Format for more details.
|
||||
package errors
|
||||
|
@ -192,7 +193,7 @@ func Wrap(err error, message string) error {
|
|||
}
|
||||
|
||||
// Wrapf returns an error annotating err with a stack trace
|
||||
// at the point Wrapf is call, and the format specifier.
|
||||
// at the point Wrapf is called, and the format specifier.
|
||||
// If err is nil, Wrapf returns nil.
|
||||
func Wrapf(err error, format string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
|
@ -220,6 +221,18 @@ func WithMessage(err error, message string) error {
|
|||
}
|
||||
}
|
||||
|
||||
// WithMessagef annotates err with the format specifier.
|
||||
// If err is nil, WithMessagef returns nil.
|
||||
func WithMessagef(err error, format string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withMessage{
|
||||
cause: err,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}
|
||||
}
|
||||
|
||||
type withMessage struct {
|
||||
cause error
|
||||
msg string
|
||||
|
|
|
@ -46,7 +46,8 @@ func (f Frame) line() int {
|
|||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+s path of source file relative to the compile time GOPATH
|
||||
// %+s function name and path of source file relative to the compile time
|
||||
// GOPATH separated by \n\t (<funcname>\n\t<path>)
|
||||
// %+v equivalent to %+s:%d
|
||||
func (f Frame) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
|
@ -79,6 +80,14 @@ func (f Frame) Format(s fmt.State, verb rune) {
|
|||
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
||||
type StackTrace []Frame
|
||||
|
||||
// Format formats the stack of Frames according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s lists source files for each Frame in the stack
|
||||
// %v lists the source file and line number for each Frame in the stack
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+v Prints filename, function, and line number for each Frame in the stack.
|
||||
func (st StackTrace) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
|
@ -136,43 +145,3 @@ func funcname(name string) string {
|
|||
i = strings.Index(name, ".")
|
||||
return name[i+1:]
|
||||
}
|
||||
|
||||
func trimGOPATH(name, file string) string {
|
||||
// Here we want to get the source file path relative to the compile time
|
||||
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
|
||||
// GOPATH at runtime, but we can infer the number of path segments in the
|
||||
// GOPATH. We note that fn.Name() returns the function name qualified by
|
||||
// the import path, which does not include the GOPATH. Thus we can trim
|
||||
// segments from the beginning of the file path until the number of path
|
||||
// separators remaining is one more than the number of path separators in
|
||||
// the function name. For example, given:
|
||||
//
|
||||
// GOPATH /home/user
|
||||
// file /home/user/src/pkg/sub/file.go
|
||||
// fn.Name() pkg/sub.Type.Method
|
||||
//
|
||||
// We want to produce:
|
||||
//
|
||||
// pkg/sub/file.go
|
||||
//
|
||||
// From this we can easily see that fn.Name() has one less path separator
|
||||
// than our desired output. We count separators from the end of the file
|
||||
// path until it finds two more than in the function name and then move
|
||||
// one character forward to preserve the initial path segment without a
|
||||
// leading separator.
|
||||
const sep = "/"
|
||||
goal := strings.Count(name, sep) + 2
|
||||
i := len(file)
|
||||
for n := 0; n < goal; n++ {
|
||||
i = strings.LastIndex(file[:i], sep)
|
||||
if i == -1 {
|
||||
// not enough separators found, set i so that the slice expression
|
||||
// below leaves file unmodified
|
||||
i = -len(sep)
|
||||
break
|
||||
}
|
||||
}
|
||||
// get back to 0 or trim the leading separator
|
||||
file = file[i+len(sep):]
|
||||
return file
|
||||
}
|
||||
|
|
|
@ -1,51 +1,25 @@
|
|||
language: go
|
||||
go_import_path: github.com/sirupsen/logrus
|
||||
git:
|
||||
depth: 1
|
||||
env:
|
||||
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
||||
- GO111MODULE=on
|
||||
- GO111MODULE=off
|
||||
go: [ 1.11.x, 1.12.x ]
|
||||
os: [ linux, osx ]
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.10.x
|
||||
install:
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get golang.org/x/crypto/ssh/terminal
|
||||
- go get golang.org/x/sys/unix
|
||||
- go get golang.org/x/sys/windows
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
- go: 1.11.x
|
||||
env: GO111MODULE=on
|
||||
install:
|
||||
- go mod download
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
- go: 1.11.x
|
||||
exclude:
|
||||
- go: 1.12.x
|
||||
env: GO111MODULE=off
|
||||
install:
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get golang.org/x/crypto/ssh/terminal
|
||||
- go get golang.org/x/sys/unix
|
||||
- go get golang.org/x/sys/windows
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
- go: 1.10.x
|
||||
install:
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get golang.org/x/crypto/ssh/terminal
|
||||
- go get golang.org/x/sys/unix
|
||||
- go get golang.org/x/sys/windows
|
||||
script:
|
||||
- go test -race -v -tags appengine ./...
|
||||
- go: 1.11.x
|
||||
env: GO111MODULE=on
|
||||
install:
|
||||
- go mod download
|
||||
script:
|
||||
- go test -race -v -tags appengine ./...
|
||||
- go: 1.11.x
|
||||
env: GO111MODULE=off
|
||||
install:
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get golang.org/x/crypto/ssh/terminal
|
||||
- go get golang.org/x/sys/unix
|
||||
- go get golang.org/x/sys/windows
|
||||
script:
|
||||
- go test -race -v -tags appengine ./...
|
||||
os: osx
|
||||
install:
|
||||
- ./travis/install.sh
|
||||
- if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi
|
||||
- if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi
|
||||
script:
|
||||
- ./travis/cross_build.sh
|
||||
- export GOMAXPROCS=4
|
||||
- export GORACE=halt_on_error=1
|
||||
- go test -race -v ./...
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi
|
||||
|
|
|
@ -1,3 +1,38 @@
|
|||
# 1.4.2
|
||||
* Fixes build break for plan9, nacl, solaris
|
||||
# 1.4.1
|
||||
This new release introduces:
|
||||
* Enhance TextFormatter to not print caller information when they are empty (#944)
|
||||
* Remove dependency on golang.org/x/crypto (#932, #943)
|
||||
|
||||
Fixes:
|
||||
* Fix Entry.WithContext method to return a copy of the initial entry (#941)
|
||||
|
||||
# 1.4.0
|
||||
This new release introduces:
|
||||
* Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
|
||||
* Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911)
|
||||
* Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
|
||||
|
||||
Fixes:
|
||||
* Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
|
||||
* Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
|
||||
* Fix infinite recursion on unknown `Level.String()` (#907)
|
||||
* Fix race condition in `getCaller` (#916).
|
||||
|
||||
|
||||
# 1.3.0
|
||||
This new release introduces:
|
||||
* Log, Logf, Logln functions for Logger and Entry that take a Level
|
||||
|
||||
Fixes:
|
||||
* Building prometheus node_exporter on AIX (#840)
|
||||
* Race condition in TextFormatter (#468)
|
||||
* Travis CI import path (#868)
|
||||
* Remove coloured output on Windows (#862)
|
||||
* Pointer to func as field in JSONFormatter (#870)
|
||||
* Properly marshal Levels (#873)
|
||||
|
||||
# 1.2.0
|
||||
This new release introduces:
|
||||
* A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
|
||||
|
|
|
@ -361,9 +361,11 @@ The built-in logging formatters are:
|
|||
Third party logging formatters:
|
||||
|
||||
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
|
||||
* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
|
||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
|
|
|
@ -51,9 +51,9 @@ func Exit(code int) {
|
|||
os.Exit(code)
|
||||
}
|
||||
|
||||
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
||||
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
||||
// made.
|
||||
// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
|
||||
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
||||
// any Fatal log entry is made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
|
@ -62,3 +62,15 @@ func Exit(code int) {
|
|||
func RegisterExitHandler(handler func()) {
|
||||
handlers = append(handlers, handler)
|
||||
}
|
||||
|
||||
// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
|
||||
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
||||
// any Fatal log entry is made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
// closing database connections, or sending a alert that the application is
|
||||
// closing.
|
||||
func DeferExitHandler(handler func()) {
|
||||
handlers = append([]func(){handler}, handlers...)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package logrus
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
|
@ -69,6 +70,9 @@ type Entry struct {
|
|||
// When formatter is called in entry.log(), a Buffer may be set to entry
|
||||
Buffer *bytes.Buffer
|
||||
|
||||
// Contains the context set by the user. Useful for hook processing etc.
|
||||
Context context.Context
|
||||
|
||||
// err may contain a field formatting error
|
||||
err string
|
||||
}
|
||||
|
@ -97,6 +101,11 @@ func (entry *Entry) WithError(err error) *Entry {
|
|||
return entry.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// Add a context to the Entry.
|
||||
func (entry *Entry) WithContext(ctx context.Context) *Entry {
|
||||
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx}
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
|
@ -108,23 +117,34 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
|
|||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
var field_err string
|
||||
fieldErr := entry.err
|
||||
for k, v := range fields {
|
||||
if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func {
|
||||
field_err = fmt.Sprintf("can not add field %q", k)
|
||||
if entry.err != "" {
|
||||
field_err = entry.err + ", " + field_err
|
||||
isErrField := false
|
||||
if t := reflect.TypeOf(v); t != nil {
|
||||
switch t.Kind() {
|
||||
case reflect.Func:
|
||||
isErrField = true
|
||||
case reflect.Ptr:
|
||||
isErrField = t.Elem().Kind() == reflect.Func
|
||||
}
|
||||
}
|
||||
if isErrField {
|
||||
tmp := fmt.Sprintf("can not add field %q", k)
|
||||
if fieldErr != "" {
|
||||
fieldErr = entry.err + ", " + tmp
|
||||
} else {
|
||||
fieldErr = tmp
|
||||
}
|
||||
} else {
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err}
|
||||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
|
||||
}
|
||||
|
||||
// Overrides the time of the Entry.
|
||||
func (entry *Entry) WithTime(t time.Time) *Entry {
|
||||
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
|
||||
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context}
|
||||
}
|
||||
|
||||
// getPackageName reduces a fully qualified function name to the package name
|
||||
|
@ -145,20 +165,23 @@ func getPackageName(f string) string {
|
|||
|
||||
// getCaller retrieves the name of the first non-logrus calling function
|
||||
func getCaller() *runtime.Frame {
|
||||
|
||||
// cache this package's fully-qualified name
|
||||
callerInitOnce.Do(func() {
|
||||
pcs := make([]uintptr, 2)
|
||||
_ = runtime.Callers(0, pcs)
|
||||
logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name())
|
||||
|
||||
// now that we have the cache, we can skip a minimum count of known-logrus functions
|
||||
// XXX this is dubious, the number of frames may vary
|
||||
minimumCallerDepth = knownLogrusFrames
|
||||
})
|
||||
|
||||
// Restrict the lookback frames to avoid runaway lookups
|
||||
pcs := make([]uintptr, maximumCallerDepth)
|
||||
depth := runtime.Callers(minimumCallerDepth, pcs)
|
||||
frames := runtime.CallersFrames(pcs[:depth])
|
||||
|
||||
// cache this package's fully-qualified name
|
||||
callerInitOnce.Do(func() {
|
||||
logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name())
|
||||
|
||||
// now that we have the cache, we can skip a minimum count of known-logrus functions
|
||||
// XXX this is dubious, the number of frames may vary store an entry in a logger interface
|
||||
minimumCallerDepth = knownLogrusFrames
|
||||
})
|
||||
|
||||
for f, again := frames.Next(); again; f, again = frames.Next() {
|
||||
pkg := getPackageName(f.Function)
|
||||
|
||||
|
@ -240,16 +263,18 @@ func (entry *Entry) write() {
|
|||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Trace(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||
entry.log(TraceLevel, fmt.Sprint(args...))
|
||||
func (entry *Entry) Log(level Level, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.log(level, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Trace(args ...interface{}) {
|
||||
entry.Log(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
|
@ -257,15 +282,11 @@ func (entry *Entry) Print(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warning(args ...interface{}) {
|
||||
|
@ -273,43 +294,37 @@ func (entry *Entry) Warning(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(FatalLevel, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(PanicLevel, args...)
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Tracef(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||
entry.Trace(fmt.Sprintf(format, args...))
|
||||
func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.Log(level, fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Tracef(format string, args ...interface{}) {
|
||||
entry.Logf(TraceLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(DebugLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(InfoLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
|
@ -317,9 +332,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(WarnLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
|
@ -327,42 +340,36 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(ErrorLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(FatalLevel, format, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(PanicLevel, format, args...)
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Traceln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(TraceLevel) {
|
||||
entry.Trace(entry.sprintlnn(args...))
|
||||
func (entry *Entry) Logln(level Level, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.Log(level, entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Traceln(args ...interface{}) {
|
||||
entry.Logln(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
|
@ -370,9 +377,7 @@ func (entry *Entry) Println(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
|
@ -380,22 +385,16 @@ func (entry *Entry) Warningln(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(FatalLevel, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(PanicLevel, args...)
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
@ -55,6 +56,11 @@ func WithError(err error) *Entry {
|
|||
return std.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// WithContext creates an entry from the standard logger and adds a context to it.
|
||||
func WithContext(ctx context.Context) *Entry {
|
||||
return std.WithContext(ctx)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
|
|
|
@ -6,6 +6,5 @@ require (
|
|||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/objx v0.1.1 // indirect
|
||||
github.com/stretchr/testify v1.2.2
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894
|
||||
)
|
||||
|
|
|
@ -2,6 +2,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
|
@ -9,7 +10,7 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
|||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type fieldKey string
|
||||
|
@ -42,6 +43,12 @@ type JSONFormatter struct {
|
|||
// }
|
||||
FieldMap FieldMap
|
||||
|
||||
// CallerPrettyfier can be set by the user to modify the content
|
||||
// of the function and file keys in the json data when ReportCaller is
|
||||
// activated. If any of the returned value is the empty string the
|
||||
// corresponding key will be removed from json fields.
|
||||
CallerPrettyfier func(*runtime.Frame) (function string, file string)
|
||||
|
||||
// PrettyPrint will indent all json logs
|
||||
PrettyPrint bool
|
||||
}
|
||||
|
@ -82,8 +89,17 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||
if entry.HasCaller() {
|
||||
data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function
|
||||
data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||
funcVal := entry.Caller.Function
|
||||
fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||
if f.CallerPrettyfier != nil {
|
||||
funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
|
||||
}
|
||||
if funcVal != "" {
|
||||
data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
|
||||
}
|
||||
if fileVal != "" {
|
||||
data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
|
||||
}
|
||||
}
|
||||
|
||||
var b *bytes.Buffer
|
||||
|
@ -98,7 +114,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
encoder.SetIndent("", " ")
|
||||
}
|
||||
if err := encoder.Encode(data); err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
|
||||
return b.Bytes(), nil
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
@ -124,6 +125,13 @@ func (logger *Logger) WithError(err error) *Entry {
|
|||
return entry.WithError(err)
|
||||
}
|
||||
|
||||
// Add a context to the log entry.
|
||||
func (logger *Logger) WithContext(ctx context.Context) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithContext(ctx)
|
||||
}
|
||||
|
||||
// Overrides the time of the log entry.
|
||||
func (logger *Logger) WithTime(t time.Time) *Entry {
|
||||
entry := logger.newEntry()
|
||||
|
@ -131,28 +139,24 @@ func (logger *Logger) WithTime(t time.Time) *Entry {
|
|||
return entry.WithTime(t)
|
||||
}
|
||||
|
||||
func (logger *Logger) Tracef(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(TraceLevel) {
|
||||
func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
entry.Tracef(format, args...)
|
||||
entry.Logf(level, format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Tracef(format string, args ...interface{}) {
|
||||
logger.Logf(TraceLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(DebugLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(DebugLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(InfoLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Infof(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(InfoLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
|
@ -162,139 +166,91 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(WarnLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(ErrorLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(FatalLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(FatalLevel, format, args...)
|
||||
logger.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(PanicLevel) {
|
||||
logger.Logf(PanicLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Log(level Level, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicf(format, args...)
|
||||
entry.Log(level, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Trace(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(TraceLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Trace(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(DebugLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Debug(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(InfoLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
entry.Print(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Warn(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Error(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(FatalLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatal(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(FatalLevel, args...)
|
||||
logger.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(PanicLevel) {
|
||||
logger.Log(PanicLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Logln(level Level, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
entry.Panic(args...)
|
||||
entry.Logln(level, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Traceln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(TraceLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Traceln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(DebugLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(InfoLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Infoln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
|
@ -304,44 +260,24 @@ func (logger *Logger) Println(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Warnln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(FatalLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(FatalLevel, args...)
|
||||
logger.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(PanicLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(PanicLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Exit(code int) {
|
||||
|
|
|
@ -14,24 +14,11 @@ type Level uint32
|
|||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case TraceLevel:
|
||||
return "trace"
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
if b, err := level.MarshalText(); err == nil {
|
||||
return string(b)
|
||||
} else {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
|
@ -69,6 +56,27 @@ func (level *Level) UnmarshalText(text []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (level Level) MarshalText() ([]byte, error) {
|
||||
switch level {
|
||||
case TraceLevel:
|
||||
return []byte("trace"), nil
|
||||
case DebugLevel:
|
||||
return []byte("debug"), nil
|
||||
case InfoLevel:
|
||||
return []byte("info"), nil
|
||||
case WarnLevel:
|
||||
return []byte("warning"), nil
|
||||
case ErrorLevel:
|
||||
return []byte("error"), nil
|
||||
case FatalLevel:
|
||||
return []byte("fatal"), nil
|
||||
case PanicLevel:
|
||||
return []byte("panic"), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("not a valid logrus level %d", level)
|
||||
}
|
||||
|
||||
// A constant exposing all logging levels
|
||||
var AllLevels = []Level{
|
||||
PanicLevel,
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package logrus
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TIOCGETA
|
||||
|
||||
func isTerminal(fd int) bool {
|
||||
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||
return err == nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// +build js
|
||||
// +build js nacl plan9
|
||||
|
||||
package logrus
|
||||
|
|
@ -1,18 +1,16 @@
|
|||
// +build !appengine,!js,!windows
|
||||
// +build !appengine,!js,!windows,!nacl,!plan9
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
return terminal.IsTerminal(int(v.Fd()))
|
||||
return isTerminal(int(v.Fd()))
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func isTerminal(fd int) bool {
|
||||
_, err := unix.IoctlGetTermio(fd, unix.TCGETA)
|
||||
return err == nil
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
// +build linux aix
|
||||
|
||||
package logrus
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TCGETS
|
||||
|
||||
func isTerminal(fd int) bool {
|
||||
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||
return err == nil
|
||||
}
|
|
@ -6,15 +6,29 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
sequences "github.com/konsorten/go-windows-terminal-sequences"
|
||||
)
|
||||
|
||||
func initTerminal(w io.Writer) {
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
|
||||
}
|
||||
}
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
var ret bool
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
var mode uint32
|
||||
err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
|
||||
return err == nil
|
||||
ret = (err == nil)
|
||||
default:
|
||||
return false
|
||||
ret = false
|
||||
}
|
||||
if ret {
|
||||
initTerminal(w)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package logrus
|
||||
|
||||
import "io"
|
||||
|
||||
func initTerminal(w io.Writer) {
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue