fixes for mintnet types
This commit is contained in:
parent
8b735b36be
commit
b6090ad183
|
@ -19,12 +19,7 @@
|
||||||
"Rev": "234959944d9cf05229b02e8b386e5cffe1e4e04a"
|
"Rev": "234959944d9cf05229b02e8b386e5cffe1e4e04a"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/inconshreveable/log15/stack",
|
"ImportPath": "github.com/inconshreveable/log15",
|
||||||
"Comment": "v2.3-48-g210d6fd",
|
|
||||||
"Rev": "210d6fdc4d979ef6579778f1b6ed84571454abb4"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/inconshreveable/log15/term",
|
|
||||||
"Comment": "v2.3-48-g210d6fd",
|
"Comment": "v2.3-48-g210d6fd",
|
||||||
"Rev": "210d6fdc4d979ef6579778f1b6ed84571454abb4"
|
"Rev": "210d6fdc4d979ef6579778f1b6ed84571454abb4"
|
||||||
},
|
},
|
||||||
|
@ -44,6 +39,16 @@
|
||||||
"ImportPath": "github.com/naoina/toml",
|
"ImportPath": "github.com/naoina/toml",
|
||||||
"Rev": "751171607256bb66e64c9f0220c00662420c38e9"
|
"Rev": "751171607256bb66e64c9f0220c00662420c38e9"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/onsi/ginkgo",
|
||||||
|
"Comment": "v1.2.0-42-g07d85e6",
|
||||||
|
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/onsi/gomega",
|
||||||
|
"Comment": "v1.0-81-gad93e46",
|
||||||
|
"Rev": "ad93e463829d54602c66e94813bc1eb9b10d454c"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/rcrowley/go-metrics",
|
"ImportPath": "github.com/rcrowley/go-metrics",
|
||||||
"Rev": "51425a2415d21afadfd55cd93432c0bc69e9598d"
|
"Rev": "51425a2415d21afadfd55cd93432c0bc69e9598d"
|
||||||
|
@ -52,6 +57,10 @@
|
||||||
"ImportPath": "github.com/sfreiberg/gotwilio",
|
"ImportPath": "github.com/sfreiberg/gotwilio",
|
||||||
"Rev": "f024bbefe80fdb7bcc8c43b6add05dae97744e0e"
|
"Rev": "f024bbefe80fdb7bcc8c43b6add05dae97744e0e"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/stathat/go",
|
||||||
|
"Rev": "cf69b0bcb80478755dc0ea1120b36000e35dcbbb"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||||
"Rev": "b7c1cafa822344831a63ad9c8fafb1556e66d33d"
|
"Rev": "b7c1cafa822344831a63ad9c8fafb1556e66d33d"
|
||||||
|
@ -132,18 +141,18 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/tendermint/tendermint/config/tendermint",
|
"ImportPath": "github.com/tendermint/tendermint/config/tendermint",
|
||||||
"Comment": "0.2-114-gc0024cc",
|
"Comment": "0.2-116-g85f6db2",
|
||||||
"Rev": "c0024cc7b209e022630e47782e787bec3640af30"
|
"Rev": "85f6db2435393a7426c6be2cbfb658f98932b52d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/tendermint/tendermint/rpc/core/types",
|
"ImportPath": "github.com/tendermint/tendermint/rpc/core/types",
|
||||||
"Comment": "0.2-114-gc0024cc",
|
"Comment": "0.2-116-g85f6db2",
|
||||||
"Rev": "c0024cc7b209e022630e47782e787bec3640af30"
|
"Rev": "85f6db2435393a7426c6be2cbfb658f98932b52d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/tendermint/tendermint/types",
|
"ImportPath": "github.com/tendermint/tendermint/types",
|
||||||
"Comment": "0.2-114-gc0024cc",
|
"Comment": "0.2-116-g85f6db2",
|
||||||
"Rev": "c0024cc7b209e022630e47782e787bec3640af30"
|
"Rev": "85f6db2435393a7426c6be2cbfb658f98932b52d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/crypto/curve25519",
|
"ImportPath": "golang.org/x/crypto/curve25519",
|
||||||
|
|
|
@ -34,7 +34,7 @@ The chain's rpc can be found at http://localhost:46657.
|
||||||
|
|
||||||
# Notes
|
# Notes
|
||||||
|
|
||||||
The netmon expects a config file with a list of chains/validators to get started. A default one for a local chain is provided as local-chain.json. `netmon config` can be used to create a config file for a chain deployed with `mintnet`.
|
The netmon expects a config file with a list of chains/validators to get started. A default one for a local chain is provided as local-chain.json. `netmon config` can be used to create a config file for a chain deployed with `mintnet`. Configs are also generated by mintnet.
|
||||||
|
|
||||||
The API is available as GET requests with URI encoded parameters, or as JSONRPC POST requests. The JSONRPC methods are also exposed over websocket.
|
The API is available as GET requests with URI encoded parameters, or as JSONRPC POST requests. The JSONRPC methods are also exposed over websocket.
|
||||||
|
|
||||||
|
|
34
main.go
34
main.go
|
@ -98,10 +98,11 @@ func cmdAddChain(c *cli.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// load new chain
|
// load new chain
|
||||||
chainCfg := new(types.BlockchainConfig)
|
chainCfg_ := new(BlockchainConfig)
|
||||||
if err := ReadJSONFile(chainCfg, path.Join(chainDir, "chain_config.json")); err != nil {
|
if err := ReadJSONFile(chainCfg_, path.Join(chainDir, "chain_config.json")); err != nil {
|
||||||
Exit(err.Error())
|
Exit(err.Error())
|
||||||
}
|
}
|
||||||
|
chainCfg := convertMintnetBlockchain(chainCfg_)
|
||||||
|
|
||||||
// append new chain
|
// append new chain
|
||||||
chainsAndVals.Blockchains = append(chainsAndVals.Blockchains, chainCfg)
|
chainsAndVals.Blockchains = append(chainsAndVals.Blockchains, chainCfg)
|
||||||
|
@ -287,13 +288,32 @@ func LoadChainsAndValsFromFile(configFile string) (*ChainsAndValidators, error)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// for now we start with one blockchain loaded from file;
|
chainsAndVals_ := new(ChainsAndValidators)
|
||||||
// eventually more can be uploaded or created through endpoints
|
wire.ReadJSON(chainsAndVals_, b, &err)
|
||||||
chainsAndVals := new(ChainsAndValidators)
|
|
||||||
wire.ReadJSON(chainsAndVals, b, &err)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return chainsAndVals, nil
|
return chainsAndVals_, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// because types are duplicated in mintnet
|
||||||
|
type BlockchainConfig struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
ValSetID string `json:"val_set_id"`
|
||||||
|
Validators []*types.ValidatorConfig `json:"validators"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertMintnetBlockchain(b *BlockchainConfig) *types.BlockchainConfig {
|
||||||
|
vals := make([]*types.ValidatorState, len(b.Validators))
|
||||||
|
for j, v := range b.Validators {
|
||||||
|
vals[j] = new(types.ValidatorState)
|
||||||
|
vals[j].Config = v
|
||||||
|
}
|
||||||
|
return &types.BlockchainConfig{
|
||||||
|
ID: b.ID,
|
||||||
|
ValSetID: b.ValSetID,
|
||||||
|
Validators: vals,
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
16
setup.sh
16
setup.sh
|
@ -4,7 +4,9 @@ set -e
|
||||||
# assumes machines already created
|
# assumes machines already created
|
||||||
N_MACHINES=4
|
N_MACHINES=4
|
||||||
|
|
||||||
TESTNET_DIR=~/testnets
|
MACH_PREFIX=mach
|
||||||
|
|
||||||
|
TESTNET_DIR=~/testnets_mach
|
||||||
CHAINS_AND_VALS=$TESTNET_DIR/chains_and_vals.json
|
CHAINS_AND_VALS=$TESTNET_DIR/chains_and_vals.json
|
||||||
CHAINS_DIR=$TESTNET_DIR/chains
|
CHAINS_DIR=$TESTNET_DIR/chains
|
||||||
VALS_DIR=$TESTNET_DIR/validators
|
VALS_DIR=$TESTNET_DIR/validators
|
||||||
|
@ -12,7 +14,7 @@ VALS_DIR=$TESTNET_DIR/validators
|
||||||
VALSETS=(validator-set-numero-uno BOA BunkBankBandaloo victory_validators)
|
VALSETS=(validator-set-numero-uno BOA BunkBankBandaloo victory_validators)
|
||||||
#VALSETS=(my-val-set)
|
#VALSETS=(my-val-set)
|
||||||
|
|
||||||
CHAINS=(blockchain1 chainiac Chainelle chain-a-daisy blockchain100 bandit-chain gambit-chain gambit-chain-duo gambit-c hain-1002)
|
CHAINS=(blockchain1 chainiac Chainelle chain-a-daisy blockchain100 bandit-chain gambit-chain gambit-chain-duo gambit-chain-1002)
|
||||||
#CHAINS=(my-chain)
|
#CHAINS=(my-chain)
|
||||||
|
|
||||||
mkdir -p $TESTNET_DIR
|
mkdir -p $TESTNET_DIR
|
||||||
|
@ -25,19 +27,23 @@ for valset in ${VALSETS[@]}; do
|
||||||
netmon chains-and-vals val $CHAINS_AND_VALS $VALS_DIR/$valset
|
netmon chains-and-vals val $CHAINS_AND_VALS $VALS_DIR/$valset
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
echo "Make some blockchains"
|
echo "Make some blockchains"
|
||||||
# make some blockchains with each validator set
|
# make some blockchains with each validator set
|
||||||
for i in ${!CHAINS[@]}; do
|
for i in ${!CHAINS[@]}; do
|
||||||
valset=$(($i % ${#VALSETS[@]}))
|
valset=$(($i % ${#VALSETS[@]}))
|
||||||
echo $valset
|
mintnet init --machines "${MACH_PREFIX}[1-4]" chain --validator-set $VALS_DIR/${VALSETS[$valset]} $CHAINS_DIR/${CHAINS[$i]}
|
||||||
mintnet init chain --validator-set $VALS_DIR/${VALSETS[$valset]} $CHAINS_DIR/${CHAINS[$i]}
|
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
echo "Start the chains"
|
echo "Start the chains"
|
||||||
for chain in ${CHAINS[@]}; do
|
for chain in ${CHAINS[@]}; do
|
||||||
|
# randomize the machine order for each chain
|
||||||
machs=`python -c "import random; x=range(1, $(($N_MACHINES+1))); random.shuffle(x); print \",\".join(map(str,x))"`
|
machs=`python -c "import random; x=range(1, $(($N_MACHINES+1))); random.shuffle(x); print \",\".join(map(str,x))"`
|
||||||
echo $machs
|
echo $machs
|
||||||
echo $chain
|
echo $chain
|
||||||
mintnet start --publish-all --machines mach[$machs] app-$chain $CHAINS_DIR/$chain
|
mintnet start --publish-all --machines ${MACH_PREFIX}[$machs] app-$chain $CHAINS_DIR/$chain
|
||||||
|
|
||||||
|
# add the new chain config
|
||||||
netmon chains-and-vals chain $CHAINS_AND_VALS $CHAINS_DIR/$chain
|
netmon chains-and-vals chain $CHAINS_AND_VALS $CHAINS_DIR/$chain
|
||||||
done
|
done
|
||||||
|
|
|
@ -15,6 +15,7 @@ const newBlockTimeoutSeconds = 5
|
||||||
|
|
||||||
//------------------------------------------------
|
//------------------------------------------------
|
||||||
// blockchain types
|
// blockchain types
|
||||||
|
// NOTE: mintnet duplicates some types from here and val.go
|
||||||
//------------------------------------------------
|
//------------------------------------------------
|
||||||
|
|
||||||
// Known chain and validator set IDs (from which anything else can be found)
|
// Known chain and validator set IDs (from which anything else can be found)
|
||||||
|
|
27
types/val.go
27
types/val.go
|
@ -1,15 +1,12 @@
|
||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/tendermint/go-crypto"
|
"github.com/tendermint/go-crypto"
|
||||||
"github.com/tendermint/go-event-meter"
|
"github.com/tendermint/go-event-meter"
|
||||||
"github.com/tendermint/go-events"
|
|
||||||
client "github.com/tendermint/go-rpc/client"
|
client "github.com/tendermint/go-rpc/client"
|
||||||
"github.com/tendermint/go-wire"
|
|
||||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||||
tmtypes "github.com/tendermint/tendermint/types"
|
tmtypes "github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
@ -68,8 +65,8 @@ func (vs *ValidatorState) Start() error {
|
||||||
rpcAddr := vs.Config.RPCAddr
|
rpcAddr := vs.Config.RPCAddr
|
||||||
vs.Config.mtx.Unlock()
|
vs.Config.mtx.Unlock()
|
||||||
|
|
||||||
em := eventmeter.NewEventMeter(fmt.Sprintf("ws://%s/websocket", rpcAddr), UnmarshalEvent)
|
em := eventmeter.NewEventMeter(fmt.Sprintf("ws://%s/websocket", rpcAddr), ctypes.UnmarshalEvent)
|
||||||
if _, err := em.Start(); err != nil {
|
if err := em.Start(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
vs.em = em
|
vs.em = em
|
||||||
|
@ -146,23 +143,3 @@ type ValidatorStatus struct {
|
||||||
Latency float64 `json:"latency" wire:"unsafe"`
|
Latency float64 `json:"latency" wire:"unsafe"`
|
||||||
BlockHeight int `json:"block_height"`
|
BlockHeight int `json:"block_height"`
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------
|
|
||||||
// utilities
|
|
||||||
|
|
||||||
// Unmarshal a json event
|
|
||||||
func UnmarshalEvent(b json.RawMessage) (string, events.EventData, error) {
|
|
||||||
var err error
|
|
||||||
result := new(ctypes.TMResult)
|
|
||||||
wire.ReadJSONPtr(result, b, &err)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
event, ok := (*result).(*ctypes.ResultEvent)
|
|
||||||
if !ok {
|
|
||||||
return "", nil, nil // TODO: handle non-event messages (ie. return from subscribe/unsubscribe)
|
|
||||||
// fmt.Errorf("Result is not type *ctypes.ResultEvent. Got %v", reflect.TypeOf(*result))
|
|
||||||
}
|
|
||||||
return event.Name, event.Data, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.0
|
||||||
|
- 1.1
|
||||||
|
- 1.2
|
||||||
|
- 1.3
|
||||||
|
- release
|
||||||
|
- tip
|
|
@ -0,0 +1,11 @@
|
||||||
|
Contributors to log15:
|
||||||
|
|
||||||
|
- Aaron L
|
||||||
|
- Alan Shreve
|
||||||
|
- Chris Hines
|
||||||
|
- Ciaran Downey
|
||||||
|
- Dmitry Chestnykh
|
||||||
|
- Evan Shaw
|
||||||
|
- Péter Szilágyi
|
||||||
|
- Trevor Gattis
|
||||||
|
- Vincent Vanackere
|
|
@ -0,0 +1,60 @@
|
||||||
|
![obligatory xkcd](http://imgs.xkcd.com/comics/standards.png)
|
||||||
|
|
||||||
|
# log15 [![godoc reference](https://godoc.org/gopkg.in/inconshreveable/log15.v2?status.png)](https://godoc.org/gopkg.in/inconshreveable/log15.v2)
|
||||||
|
|
||||||
|
Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
- A simple, easy-to-understand API
|
||||||
|
- Promotes structured logging by encouraging use of key/value pairs
|
||||||
|
- Child loggers which inherit and add their own private context
|
||||||
|
- Lazy evaluation of expensive operations
|
||||||
|
- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API.
|
||||||
|
- Color terminal support
|
||||||
|
- Built-in support for logging to files, streams, syslog, and the network
|
||||||
|
- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more
|
||||||
|
|
||||||
|
## Versioning
|
||||||
|
The API of the master branch of log15 should always be considered unstable. Using a stable version
|
||||||
|
of the log15 package is supported by gopkg.in. Include your dependency like so:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import log "gopkg.in/inconshreveable/log15.v2"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
```go
|
||||||
|
// all loggers can have key/value context
|
||||||
|
srvlog := log.New("module", "app/server")
|
||||||
|
|
||||||
|
// all log messages can have key/value context
|
||||||
|
srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate)
|
||||||
|
|
||||||
|
// child loggers with inherited context
|
||||||
|
connlog := srvlog.New("raddr", c.RemoteAddr())
|
||||||
|
connlog.Info("connection open")
|
||||||
|
|
||||||
|
// lazy evaluation
|
||||||
|
connlog.Debug("ping remote", "latency", log.Lazy(pingRemote))
|
||||||
|
|
||||||
|
// flexible configuration
|
||||||
|
srvlog.SetHandler(log.MultiHandler(
|
||||||
|
log.StreamHandler(os.Stderr, log.LogfmtFormat()),
|
||||||
|
log.LvlFilterHandler(
|
||||||
|
log.LvlError,
|
||||||
|
log.Must.FileHandler("errors.json", log.JsonFormat())))
|
||||||
|
```
|
||||||
|
|
||||||
|
## FAQ
|
||||||
|
|
||||||
|
### The varargs style is brittle and error prone! Can I have type safety please?
|
||||||
|
Yes. Use `log.Ctx`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
srvlog := log.New(log.Ctx{"module": "app/server"})
|
||||||
|
srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate})
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
Apache
|
|
@ -0,0 +1,19 @@
|
||||||
|
# log15's release strategy
|
||||||
|
|
||||||
|
log15 uses gopkg.in to manage versioning releases so that consumers who don't vendor dependencies can rely upon a stable API.
|
||||||
|
|
||||||
|
## Master
|
||||||
|
|
||||||
|
Master is considered to have no API stability guarantee, so merging new code that passes tests into master is always okay.
|
||||||
|
|
||||||
|
## Releasing a new API-compatible version
|
||||||
|
|
||||||
|
The process to release a new API-compatible version is described below. For the purposes of this example, we'll assume you're trying to release a new version of v2
|
||||||
|
|
||||||
|
1. `git checkout v2`
|
||||||
|
1. `git merge master`
|
||||||
|
1. Audit the code for any imports of sub-packages. Modify any import references from `github.com/inconshrevealbe/log15/<pkg>` -> `gopkg.in/inconshreveable/log15.v2/<pkg>`
|
||||||
|
1. `git commit`
|
||||||
|
1. `git tag`, find the latest tag of the style v2.X.
|
||||||
|
1. `git tag v2.X+1` If the last version was v2.6, you would run `git tag v2.7`
|
||||||
|
1. `git push --tags git@github.com:inconshreveable/log15.git v2`
|
|
@ -0,0 +1,333 @@
|
||||||
|
/*
|
||||||
|
Package log15 provides an opinionated, simple toolkit for best-practice logging that is
|
||||||
|
both human and machine readable. It is modeled after the standard library's io and net/http
|
||||||
|
packages.
|
||||||
|
|
||||||
|
This package enforces you to only log key/value pairs. Keys must be strings. Values may be
|
||||||
|
any type that you like. The default output format is logfmt, but you may also choose to use
|
||||||
|
JSON instead if that suits you. Here's how you log:
|
||||||
|
|
||||||
|
log.Info("page accessed", "path", r.URL.Path, "user_id", user.id)
|
||||||
|
|
||||||
|
This will output a line that looks like:
|
||||||
|
|
||||||
|
lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9
|
||||||
|
|
||||||
|
Getting Started
|
||||||
|
|
||||||
|
To get started, you'll want to import the library:
|
||||||
|
|
||||||
|
import log "gopkg.in/inconshreveable/log15.v2"
|
||||||
|
|
||||||
|
|
||||||
|
Now you're ready to start logging:
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.Info("Program starting", "args", os.Args())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Convention
|
||||||
|
|
||||||
|
Because recording a human-meaningful message is common and good practice, the first argument to every
|
||||||
|
logging method is the value to the *implicit* key 'msg'.
|
||||||
|
|
||||||
|
Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so
|
||||||
|
will the current timestamp with key 't'.
|
||||||
|
|
||||||
|
You may supply any additional context as a set of key/value pairs to the logging function. log15 allows
|
||||||
|
you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for
|
||||||
|
logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate
|
||||||
|
in the variadic argument list:
|
||||||
|
|
||||||
|
log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val)
|
||||||
|
|
||||||
|
If you really do favor your type-safety, you may choose to pass a log.Ctx instead:
|
||||||
|
|
||||||
|
log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val})
|
||||||
|
|
||||||
|
|
||||||
|
Context loggers
|
||||||
|
|
||||||
|
Frequently, you want to add context to a logger so that you can track actions associated with it. An http
|
||||||
|
request is a good example. You can easily create new loggers that have context that is automatically included
|
||||||
|
with each log line:
|
||||||
|
|
||||||
|
requestlogger := log.New("path", r.URL.Path)
|
||||||
|
|
||||||
|
// later
|
||||||
|
requestlogger.Debug("db txn commit", "duration", txnTimer.Finish())
|
||||||
|
|
||||||
|
This will output a log line that includes the path context that is attached to the logger:
|
||||||
|
|
||||||
|
lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12
|
||||||
|
|
||||||
|
|
||||||
|
Handlers
|
||||||
|
|
||||||
|
The Handler interface defines where log lines are printed to and how they are formated. Handler is a
|
||||||
|
single interface that is inspired by net/http's handler interface:
|
||||||
|
|
||||||
|
type Handler interface {
|
||||||
|
Log(r *Record)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Handlers can filter records, format them, or dispatch to multiple other Handlers.
|
||||||
|
This package implements a number of Handlers for common logging patterns that are
|
||||||
|
easily composed to create flexible, custom logging structures.
|
||||||
|
|
||||||
|
Here's an example handler that prints logfmt output to Stdout:
|
||||||
|
|
||||||
|
handler := log.StreamHandler(os.Stdout, log.LogfmtFormat())
|
||||||
|
|
||||||
|
Here's an example handler that defers to two other handlers. One handler only prints records
|
||||||
|
from the rpc package in logfmt to standard out. The other prints records at Error level
|
||||||
|
or above in JSON formatted output to the file /var/log/service.json
|
||||||
|
|
||||||
|
handler := log.MultiHandler(
|
||||||
|
log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())),
|
||||||
|
log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler())
|
||||||
|
)
|
||||||
|
|
||||||
|
Logging File Names and Line Numbers
|
||||||
|
|
||||||
|
This package implements three Handlers that add debugging information to the
|
||||||
|
context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's
|
||||||
|
an example that adds the source file and line number of each logging call to
|
||||||
|
the context.
|
||||||
|
|
||||||
|
h := log.CallerFileHandler(log.StdoutHandler())
|
||||||
|
log.Root().SetHandler(h)
|
||||||
|
...
|
||||||
|
log.Error("open file", "err", err)
|
||||||
|
|
||||||
|
This will output a line that looks like:
|
||||||
|
|
||||||
|
lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42
|
||||||
|
|
||||||
|
Here's an example that logs the call stack rather than just the call site.
|
||||||
|
|
||||||
|
h := log.CallerStackHandler("%+v", log.StdoutHandler())
|
||||||
|
log.Root().SetHandler(h)
|
||||||
|
...
|
||||||
|
log.Error("open file", "err", err)
|
||||||
|
|
||||||
|
This will output a line that looks like:
|
||||||
|
|
||||||
|
lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]"
|
||||||
|
|
||||||
|
The "%+v" format instructs the handler to include the path of the source file
|
||||||
|
relative to the compile time GOPATH. The log15/stack package documents the
|
||||||
|
full list of formatting verbs and modifiers available.
|
||||||
|
|
||||||
|
Custom Handlers
|
||||||
|
|
||||||
|
The Handler interface is so simple that it's also trivial to write your own. Let's create an
|
||||||
|
example handler which tries to write to one handler, but if that fails it falls back to
|
||||||
|
writing to another handler and includes the error that it encountered when trying to write
|
||||||
|
to the primary. This might be useful when trying to log over a network socket, but if that
|
||||||
|
fails you want to log those records to a file on disk.
|
||||||
|
|
||||||
|
type BackupHandler struct {
|
||||||
|
Primary Handler
|
||||||
|
Secondary Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *BackupHandler) Log (r *Record) error {
|
||||||
|
err := h.Primary.Log(r)
|
||||||
|
if err != nil {
|
||||||
|
r.Ctx = append(ctx, "primary_err", err)
|
||||||
|
return h.Secondary.Log(r)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
This pattern is so useful that a generic version that handles an arbitrary number of Handlers
|
||||||
|
is included as part of this library called FailoverHandler.
|
||||||
|
|
||||||
|
Logging Expensive Operations
|
||||||
|
|
||||||
|
Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay
|
||||||
|
the price of computing them if you haven't turned up your logging level to a high level of detail.
|
||||||
|
|
||||||
|
This package provides a simple type to annotate a logging operation that you want to be evaluated
|
||||||
|
lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler
|
||||||
|
filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example:
|
||||||
|
|
||||||
|
func factorRSAKey() (factors []int) {
|
||||||
|
// return the factors of a very large number
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("factors", log.Lazy{factorRSAKey})
|
||||||
|
|
||||||
|
If this message is not logged for any reason (like logging at the Error level), then
|
||||||
|
factorRSAKey is never evaluated.
|
||||||
|
|
||||||
|
Dynamic context values
|
||||||
|
|
||||||
|
The same log.Lazy mechanism can be used to attach context to a logger which you want to be
|
||||||
|
evaluated when the message is logged, but not when the logger is created. For example, let's imagine
|
||||||
|
a game where you have Player objects:
|
||||||
|
|
||||||
|
type Player struct {
|
||||||
|
name string
|
||||||
|
alive bool
|
||||||
|
log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
You always want to log a player's name and whether they're alive or dead, so when you create the player
|
||||||
|
object, you might do:
|
||||||
|
|
||||||
|
p := &Player{name: name, alive: true}
|
||||||
|
p.Logger = log.New("name", p.name, "alive", p.alive)
|
||||||
|
|
||||||
|
Only now, even after a player has died, the logger will still report they are alive because the logging
|
||||||
|
context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation
|
||||||
|
of whether the player is alive or not to each log message, so that the log records will reflect the player's
|
||||||
|
current state no matter when the log message is written:
|
||||||
|
|
||||||
|
p := &Player{name: name, alive: true}
|
||||||
|
isAlive := func() bool { return p.alive }
|
||||||
|
player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive})
|
||||||
|
|
||||||
|
Terminal Format
|
||||||
|
|
||||||
|
If log15 detects that stdout is a terminal, it will configure the default
|
||||||
|
handler for it (which is log.StdoutHandler) to use TerminalFormat. This format
|
||||||
|
logs records nicely for your terminal, including color-coded output based
|
||||||
|
on log level.
|
||||||
|
|
||||||
|
Error Handling
|
||||||
|
|
||||||
|
Becasuse log15 allows you to step around the type system, there are a few ways you can specify
|
||||||
|
invalid arguments to the logging functions. You could, for example, wrap something that is not
|
||||||
|
a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries
|
||||||
|
are typically the mechanism by which errors are reported, it would be onerous for the logging functions
|
||||||
|
to return errors. Instead, log15 handles errors by making these guarantees to you:
|
||||||
|
|
||||||
|
- Any log record containing an error will still be printed with the error explained to you as part of the log record.
|
||||||
|
|
||||||
|
- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily
|
||||||
|
(and if you like, automatically) detect if any of your logging calls are passing bad values.
|
||||||
|
|
||||||
|
Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers
|
||||||
|
are encouraged to return errors only if they fail to write their log records out to an external source like if the
|
||||||
|
syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures
|
||||||
|
like the FailoverHandler.
|
||||||
|
|
||||||
|
Library Use
|
||||||
|
|
||||||
|
log15 is intended to be useful for library authors as a way to provide configurable logging to
|
||||||
|
users of their library. Best practice for use in a library is to always disable all output for your logger
|
||||||
|
by default and to provide a public Logger instance that consumers of your library can configure. Like so:
|
||||||
|
|
||||||
|
package yourlib
|
||||||
|
|
||||||
|
import "gopkg.in/inconshreveable/log15.v2"
|
||||||
|
|
||||||
|
var Log = log.New()
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Log.SetHandler(log.DiscardHandler())
|
||||||
|
}
|
||||||
|
|
||||||
|
Users of your library may then enable it if they like:
|
||||||
|
|
||||||
|
import "gopkg.in/inconshreveable/log15.v2"
|
||||||
|
import "example.com/yourlib"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
handler := // custom handler setup
|
||||||
|
yourlib.Log.SetHandler(handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
Best practices attaching logger context
|
||||||
|
|
||||||
|
The ability to attach context to a logger is a powerful one. Where should you do it and why?
|
||||||
|
I favor embedding a Logger directly into any persistent object in my application and adding
|
||||||
|
unique, tracing context keys to it. For instance, imagine I am writing a web browser:
|
||||||
|
|
||||||
|
type Tab struct {
|
||||||
|
url string
|
||||||
|
render *RenderingContext
|
||||||
|
// ...
|
||||||
|
|
||||||
|
Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTab(url string) *Tab {
|
||||||
|
return &Tab {
|
||||||
|
// ...
|
||||||
|
url: url,
|
||||||
|
|
||||||
|
Logger: log.New("url", url),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
When a new tab is created, I assign a logger to it with the url of
|
||||||
|
the tab as context so it can easily be traced through the logs.
|
||||||
|
Now, whenever we perform any operation with the tab, we'll log with its
|
||||||
|
embedded logger and it will include the tab title automatically:
|
||||||
|
|
||||||
|
tab.Debug("moved position", "idx", tab.idx)
|
||||||
|
|
||||||
|
There's only one problem. What if the tab url changes? We could
|
||||||
|
use log.Lazy to make sure the current url is always written, but that
|
||||||
|
would mean that we couldn't trace a tab's full lifetime through our
|
||||||
|
logs after the user navigate to a new URL.
|
||||||
|
|
||||||
|
Instead, think about what values to attach to your loggers the
|
||||||
|
same way you think about what to use as a key in a SQL database schema.
|
||||||
|
If it's possible to use a natural key that is unique for the lifetime of the
|
||||||
|
object, do so. But otherwise, log15's ext package has a handy RandId
|
||||||
|
function to let you generate what you might call "surrogate keys"
|
||||||
|
They're just random hex identifiers to use for tracing. Back to our
|
||||||
|
Tab example, we would prefer to set up our Logger like so:
|
||||||
|
|
||||||
|
import logext "gopkg.in/inconshreveable/log15.v2/ext"
|
||||||
|
|
||||||
|
t := &Tab {
|
||||||
|
// ...
|
||||||
|
url: url,
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl})
|
||||||
|
return t
|
||||||
|
|
||||||
|
Now we'll have a unique traceable identifier even across loading new urls, but
|
||||||
|
we'll still be able to see the tab's current url in the log messages.
|
||||||
|
|
||||||
|
Must
|
||||||
|
|
||||||
|
For all Handler functions which can return an error, there is a version of that
|
||||||
|
function which will return no error but panics on failure. They are all available
|
||||||
|
on the Must object. For example:
|
||||||
|
|
||||||
|
log.Must.FileHandler("/path", log.JsonFormat)
|
||||||
|
log.Must.NetHandler("tcp", ":1234", log.JsonFormat)
|
||||||
|
|
||||||
|
Inspiration and Credit
|
||||||
|
|
||||||
|
All of the following excellent projects inspired the design of this library:
|
||||||
|
|
||||||
|
code.google.com/p/log4go
|
||||||
|
|
||||||
|
github.com/op/go-logging
|
||||||
|
|
||||||
|
github.com/technoweenie/grohl
|
||||||
|
|
||||||
|
github.com/Sirupsen/logrus
|
||||||
|
|
||||||
|
github.com/kr/logfmt
|
||||||
|
|
||||||
|
github.com/spacemonkeygo/spacelog
|
||||||
|
|
||||||
|
golang's stdlib, notably io and net/http
|
||||||
|
|
||||||
|
The Name
|
||||||
|
|
||||||
|
https://xkcd.com/927/
|
||||||
|
|
||||||
|
*/
|
||||||
|
package log15
|
|
@ -0,0 +1,130 @@
|
||||||
|
package ext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
log "github.com/inconshreveable/log15"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EscalateErrHandler wraps another handler and passes all records through
|
||||||
|
// unchanged except if the logged context contains a non-nil error
|
||||||
|
// value in its context. In that case, the record's level is raised
|
||||||
|
// to LvlError unless it was already more serious (LvlCrit).
|
||||||
|
//
|
||||||
|
// This allows you to log the result of all functions for debugging
|
||||||
|
// and still capture error conditions when in production with a single
|
||||||
|
// log line. As an example, the following the log record will be written
|
||||||
|
// out only if there was an error writing a value to redis:
|
||||||
|
//
|
||||||
|
// logger := logext.EscalateErrHandler(
|
||||||
|
// log.LvlFilterHandler(log.LvlInfo, log.StdoutHandler))
|
||||||
|
//
|
||||||
|
// reply, err := redisConn.Do("SET", "foo", "bar")
|
||||||
|
// logger.Debug("Wrote value to redis", "reply", reply, "err", err)
|
||||||
|
// if err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
func EscalateErrHandler(h log.Handler) log.Handler {
|
||||||
|
return log.FuncHandler(func(r *log.Record) error {
|
||||||
|
if r.Lvl > log.LvlError {
|
||||||
|
for i := 1; i < len(r.Ctx); i++ {
|
||||||
|
if v, ok := r.Ctx[i].(error); ok && v != nil {
|
||||||
|
r.Lvl = log.LvlError
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpeculativeHandler is a handler for speculative logging. It
|
||||||
|
// keeps a ring buffer of the given size full of the last events
|
||||||
|
// logged into it. When Flush is called, all buffered log records
|
||||||
|
// are written to the wrapped handler. This is extremely for
|
||||||
|
// continuosly capturing debug level output, but only flushing those
|
||||||
|
// log records if an exceptional condition is encountered.
|
||||||
|
func SpeculativeHandler(size int, h log.Handler) *Speculative {
|
||||||
|
return &Speculative{
|
||||||
|
handler: h,
|
||||||
|
recs: make([]*log.Record, size),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Speculative struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
idx int
|
||||||
|
recs []*log.Record
|
||||||
|
handler log.Handler
|
||||||
|
full bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Speculative) Log(r *log.Record) error {
|
||||||
|
h.mu.Lock()
|
||||||
|
defer h.mu.Unlock()
|
||||||
|
h.recs[h.idx] = r
|
||||||
|
h.idx = (h.idx + 1) % len(h.recs)
|
||||||
|
h.full = h.full || h.idx == 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Speculative) Flush() {
|
||||||
|
recs := make([]*log.Record, 0)
|
||||||
|
func() {
|
||||||
|
h.mu.Lock()
|
||||||
|
defer h.mu.Unlock()
|
||||||
|
if h.full {
|
||||||
|
recs = append(recs, h.recs[h.idx:]...)
|
||||||
|
}
|
||||||
|
recs = append(recs, h.recs[:h.idx]...)
|
||||||
|
|
||||||
|
// reset state
|
||||||
|
h.full = false
|
||||||
|
h.idx = 0
|
||||||
|
}()
|
||||||
|
|
||||||
|
// don't hold the lock while we flush to the wrapped handler
|
||||||
|
for _, r := range recs {
|
||||||
|
h.handler.Log(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HotSwapHandler wraps another handler that may swapped out
|
||||||
|
// dynamically at runtime in a thread-safe fashion.
|
||||||
|
// HotSwapHandler is the same functionality
|
||||||
|
// used to implement the SetHandler method for the default
|
||||||
|
// implementation of Logger.
|
||||||
|
func HotSwapHandler(h log.Handler) *HotSwap {
|
||||||
|
hs := new(HotSwap)
|
||||||
|
hs.Swap(h)
|
||||||
|
return hs
|
||||||
|
}
|
||||||
|
|
||||||
|
type HotSwap struct {
|
||||||
|
handler unsafe.Pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HotSwap) Log(r *log.Record) error {
|
||||||
|
return (*(*log.Handler)(atomic.LoadPointer(&h.handler))).Log(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HotSwap) Swap(newHandler log.Handler) {
|
||||||
|
atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FatalHandler makes critical errors exit the program
|
||||||
|
// immediately, much like the log.Fatal* methods from the
|
||||||
|
// standard log package
|
||||||
|
func FatalHandler(h log.Handler) log.Handler {
|
||||||
|
return log.FuncHandler(func(r *log.Record) error {
|
||||||
|
err := h.Log(r)
|
||||||
|
if r.Lvl == log.LvlCrit {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
package ext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var r = rand.New(&lockedSource{src: rand.NewSource(time.Now().Unix())})
|
||||||
|
|
||||||
|
// RandId creates a random identifier of the requested length.
|
||||||
|
// Useful for assigning mostly-unique identifiers for logging
|
||||||
|
// and identification that are unlikely to collide because of
|
||||||
|
// short lifespan or low set cardinality
|
||||||
|
func RandId(idlen int) string {
|
||||||
|
b := make([]byte, idlen)
|
||||||
|
var randVal uint32
|
||||||
|
for i := 0; i < idlen; i++ {
|
||||||
|
byteIdx := i % 4
|
||||||
|
if byteIdx == 0 {
|
||||||
|
randVal = r.Uint32()
|
||||||
|
}
|
||||||
|
b[i] = byte((randVal >> (8 * uint(byteIdx))) & 0xFF)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%x", b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// lockedSource is a wrapper to allow a rand.Source to be used
|
||||||
|
// concurrently (same type as the one used internally in math/rand).
|
||||||
|
type lockedSource struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
src rand.Source
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lockedSource) Int63() (n int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
n = r.src.Int63()
|
||||||
|
r.lk.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lockedSource) Seed(seed int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
r.src.Seed(seed)
|
||||||
|
r.lk.Unlock()
|
||||||
|
}
|
|
@ -0,0 +1,257 @@
|
||||||
|
package log15
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
timeFormat = "2006-01-02T15:04:05-0700"
|
||||||
|
termTimeFormat = "01-02|15:04:05"
|
||||||
|
floatFormat = 'f'
|
||||||
|
termMsgJust = 40
|
||||||
|
)
|
||||||
|
|
||||||
|
type Format interface {
|
||||||
|
Format(r *Record) []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatFunc returns a new Format object which uses
|
||||||
|
// the given function to perform record formatting.
|
||||||
|
func FormatFunc(f func(*Record) []byte) Format {
|
||||||
|
return formatFunc(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
type formatFunc func(*Record) []byte
|
||||||
|
|
||||||
|
func (f formatFunc) Format(r *Record) []byte {
|
||||||
|
return f(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TerminalFormat formats log records optimized for human readability on
|
||||||
|
// a terminal with color-coded level output and terser human friendly timestamp.
|
||||||
|
// This format should only be used for interactive programs or while developing.
|
||||||
|
//
|
||||||
|
// [TIME] [LEVEL] MESAGE key=value key=value ...
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002
|
||||||
|
//
|
||||||
|
func TerminalFormat() Format {
|
||||||
|
return FormatFunc(func(r *Record) []byte {
|
||||||
|
var color = 0
|
||||||
|
switch r.Lvl {
|
||||||
|
case LvlCrit:
|
||||||
|
color = 35
|
||||||
|
case LvlError:
|
||||||
|
color = 31
|
||||||
|
case LvlWarn:
|
||||||
|
color = 33
|
||||||
|
case LvlInfo:
|
||||||
|
color = 32
|
||||||
|
case LvlDebug:
|
||||||
|
color = 36
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
lvl := strings.ToUpper(r.Lvl.String())
|
||||||
|
if color > 0 {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "[%s] [%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to justify the log output for short messages
|
||||||
|
if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust {
|
||||||
|
b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// print the keys logfmt style
|
||||||
|
logfmt(b, r.Ctx, color)
|
||||||
|
return b.Bytes()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable
|
||||||
|
// format for key/value pairs.
|
||||||
|
//
|
||||||
|
// For more details see: http://godoc.org/github.com/kr/logfmt
|
||||||
|
//
|
||||||
|
func LogfmtFormat() Format {
|
||||||
|
return FormatFunc(func(r *Record) []byte {
|
||||||
|
common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
logfmt(buf, append(common, r.Ctx...), 0)
|
||||||
|
return buf.Bytes()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) {
|
||||||
|
for i := 0; i < len(ctx); i += 2 {
|
||||||
|
if i != 0 {
|
||||||
|
buf.WriteByte(' ')
|
||||||
|
}
|
||||||
|
|
||||||
|
k, ok := ctx[i].(string)
|
||||||
|
v := formatLogfmtValue(ctx[i+1])
|
||||||
|
if !ok {
|
||||||
|
k, v = errorKey, formatLogfmtValue(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: we should probably check that all of your key bytes aren't invalid
|
||||||
|
if color > 0 {
|
||||||
|
fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(buf, "%s=%s", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteByte('\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
// JsonFormat formats log records as JSON objects separated by newlines.
|
||||||
|
// It is the equivalent of JsonFormatEx(false, true).
|
||||||
|
func JsonFormat() Format {
|
||||||
|
return JsonFormatEx(false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JsonFormatEx formats log records as JSON objects. If pretty is true,
|
||||||
|
// records will be pretty-printed. If lineSeparated is true, records
|
||||||
|
// will be logged with a new line between each record.
|
||||||
|
func JsonFormatEx(pretty, lineSeparated bool) Format {
|
||||||
|
jsonMarshal := json.Marshal
|
||||||
|
if pretty {
|
||||||
|
jsonMarshal = func(v interface{}) ([]byte, error) {
|
||||||
|
return json.MarshalIndent(v, "", " ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return FormatFunc(func(r *Record) []byte {
|
||||||
|
props := make(map[string]interface{})
|
||||||
|
|
||||||
|
props[r.KeyNames.Time] = r.Time
|
||||||
|
props[r.KeyNames.Lvl] = r.Lvl.String()
|
||||||
|
props[r.KeyNames.Msg] = r.Msg
|
||||||
|
|
||||||
|
for i := 0; i < len(r.Ctx); i += 2 {
|
||||||
|
k, ok := r.Ctx[i].(string)
|
||||||
|
if !ok {
|
||||||
|
props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i])
|
||||||
|
}
|
||||||
|
props[k] = formatJsonValue(r.Ctx[i+1])
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := jsonMarshal(props)
|
||||||
|
if err != nil {
|
||||||
|
b, _ = jsonMarshal(map[string]string{
|
||||||
|
errorKey: err.Error(),
|
||||||
|
})
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
if lineSeparated {
|
||||||
|
b = append(b, '\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
return b
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatShared(value interface{}) (result interface{}) {
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||||
|
result = "nil"
|
||||||
|
} else {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
switch v := value.(type) {
|
||||||
|
case time.Time:
|
||||||
|
return v.Format(timeFormat)
|
||||||
|
|
||||||
|
case error:
|
||||||
|
return v.Error()
|
||||||
|
|
||||||
|
case fmt.Stringer:
|
||||||
|
return v.String()
|
||||||
|
|
||||||
|
default:
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatJsonValue(value interface{}) interface{} {
|
||||||
|
value = formatShared(value)
|
||||||
|
switch value.(type) {
|
||||||
|
case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string:
|
||||||
|
return value
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%+v", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatValue formats a value for serialization
|
||||||
|
func formatLogfmtValue(value interface{}) string {
|
||||||
|
if value == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
|
||||||
|
value = formatShared(value)
|
||||||
|
switch v := value.(type) {
|
||||||
|
case bool:
|
||||||
|
return strconv.FormatBool(v)
|
||||||
|
case float32:
|
||||||
|
return strconv.FormatFloat(float64(v), floatFormat, 3, 64)
|
||||||
|
case float64:
|
||||||
|
return strconv.FormatFloat(v, floatFormat, 3, 64)
|
||||||
|
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
|
||||||
|
return fmt.Sprintf("%d", value)
|
||||||
|
case string:
|
||||||
|
return escapeString(v)
|
||||||
|
default:
|
||||||
|
return escapeString(fmt.Sprintf("%+v", value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeString(s string) string {
|
||||||
|
needQuotes := false
|
||||||
|
e := bytes.Buffer{}
|
||||||
|
e.WriteByte('"')
|
||||||
|
for _, r := range s {
|
||||||
|
if r <= ' ' || r == '=' || r == '"' {
|
||||||
|
needQuotes = true
|
||||||
|
}
|
||||||
|
|
||||||
|
switch r {
|
||||||
|
case '\\', '"':
|
||||||
|
e.WriteByte('\\')
|
||||||
|
e.WriteByte(byte(r))
|
||||||
|
case '\n':
|
||||||
|
e.WriteByte('\\')
|
||||||
|
e.WriteByte('n')
|
||||||
|
case '\r':
|
||||||
|
e.WriteByte('\\')
|
||||||
|
e.WriteByte('r')
|
||||||
|
case '\t':
|
||||||
|
e.WriteByte('\\')
|
||||||
|
e.WriteByte('t')
|
||||||
|
default:
|
||||||
|
e.WriteRune(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.WriteByte('"')
|
||||||
|
start, stop := 0, e.Len()
|
||||||
|
if !needQuotes {
|
||||||
|
start, stop = 1, stop-1
|
||||||
|
}
|
||||||
|
return string(e.Bytes()[start:stop])
|
||||||
|
}
|
|
@ -0,0 +1,371 @@
|
||||||
|
package log15
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/inconshreveable/log15/stack"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Logger prints its log records by writing to a Handler.
|
||||||
|
// The Handler interface defines where and how log records are written.
|
||||||
|
// Handlers are composable, providing you great flexibility in combining
|
||||||
|
// them to achieve the logging structure that suits your applications.
|
||||||
|
type Handler interface {
|
||||||
|
Log(r *Record) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FuncHandler returns a Handler that logs records with the given
|
||||||
|
// function.
|
||||||
|
func FuncHandler(fn func(r *Record) error) Handler {
|
||||||
|
return funcHandler(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
type funcHandler func(r *Record) error
|
||||||
|
|
||||||
|
func (h funcHandler) Log(r *Record) error {
|
||||||
|
return h(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamHandler writes log records to an io.Writer
|
||||||
|
// with the given format. StreamHandler can be used
|
||||||
|
// to easily begin writing log records to other
|
||||||
|
// outputs.
|
||||||
|
//
|
||||||
|
// StreamHandler wraps itself with LazyHandler and SyncHandler
|
||||||
|
// to evaluate Lazy objects and perform safe concurrent writes.
|
||||||
|
func StreamHandler(wr io.Writer, fmtr Format) Handler {
|
||||||
|
h := FuncHandler(func(r *Record) error {
|
||||||
|
_, err := wr.Write(fmtr.Format(r))
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return LazyHandler(SyncHandler(h))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyncHandler can be wrapped around a handler to guarantee that
|
||||||
|
// only a single Log operation can proceed at a time. It's necessary
|
||||||
|
// for thread-safe concurrent writes.
|
||||||
|
func SyncHandler(h Handler) Handler {
|
||||||
|
var mu sync.Mutex
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
defer mu.Unlock()
|
||||||
|
mu.Lock()
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileHandler returns a handler which writes log records to the give file
|
||||||
|
// using the given format. If the path
|
||||||
|
// already exists, FileHandler will append to the given file. If it does not,
|
||||||
|
// FileHandler will create the file with mode 0644.
|
||||||
|
func FileHandler(path string, fmtr Format) (Handler, error) {
|
||||||
|
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return closingHandler{f, StreamHandler(f, fmtr)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetHandler opens a socket to the given address and writes records
|
||||||
|
// over the connection.
|
||||||
|
func NetHandler(network, addr string, fmtr Format) (Handler, error) {
|
||||||
|
conn, err := net.Dial(network, addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return closingHandler{conn, StreamHandler(conn, fmtr)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: closingHandler is essentially unused at the moment
|
||||||
|
// it's meant for a future time when the Handler interface supports
|
||||||
|
// a possible Close() operation
|
||||||
|
type closingHandler struct {
|
||||||
|
io.WriteCloser
|
||||||
|
Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *closingHandler) Close() error {
|
||||||
|
return h.WriteCloser.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallerFileHandler returns a Handler that adds the line number and file of
|
||||||
|
// the calling function to the context with key "caller".
|
||||||
|
func CallerFileHandler(h Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
call := stack.Call(r.CallPC[0])
|
||||||
|
r.Ctx = append(r.Ctx, "caller", fmt.Sprint(call))
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallerFuncHandler returns a Handler that adds the calling function name to
|
||||||
|
// the context with key "fn".
|
||||||
|
func CallerFuncHandler(h Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
call := stack.Call(r.CallPC[0])
|
||||||
|
r.Ctx = append(r.Ctx, "fn", fmt.Sprintf("%+n", call))
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallerStackHandler returns a Handler that adds a stack trace to the context
|
||||||
|
// with key "stack". The stack trace is formated as a space separated list of
|
||||||
|
// call sites inside matching []'s. The most recent call site is listed first.
|
||||||
|
// Each call site is formatted according to format. See the documentation of
|
||||||
|
// log15/stack.Call.Format for the list of supported formats.
|
||||||
|
func CallerStackHandler(format string, h Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
s := stack.Callers().
|
||||||
|
TrimBelow(stack.Call(r.CallPC[0])).
|
||||||
|
TrimRuntime()
|
||||||
|
if len(s) > 0 {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
buf.WriteByte('[')
|
||||||
|
for i, pc := range s {
|
||||||
|
if i > 0 {
|
||||||
|
buf.WriteByte(' ')
|
||||||
|
}
|
||||||
|
fmt.Fprintf(buf, format, pc)
|
||||||
|
}
|
||||||
|
buf.WriteByte(']')
|
||||||
|
r.Ctx = append(r.Ctx, "stack", buf.String())
|
||||||
|
}
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterHandler returns a Handler that only writes records to the
|
||||||
|
// wrapped Handler if the given function evaluates true. For example,
|
||||||
|
// to only log records where the 'err' key is not nil:
|
||||||
|
//
|
||||||
|
// logger.SetHandler(FilterHandler(func(r *Record) bool {
|
||||||
|
// for i := 0; i < len(r.Ctx); i += 2 {
|
||||||
|
// if r.Ctx[i] == "err" {
|
||||||
|
// return r.Ctx[i+1] != nil
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// return false
|
||||||
|
// }, h))
|
||||||
|
//
|
||||||
|
func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
if fn(r) {
|
||||||
|
return h.Log(r)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchFilterHandler returns a Handler that only writes records
|
||||||
|
// to the wrapped Handler if the given key in the logged
|
||||||
|
// context matches the value. For example, to only log records
|
||||||
|
// from your ui package:
|
||||||
|
//
|
||||||
|
// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler)
|
||||||
|
//
|
||||||
|
func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
|
||||||
|
return FilterHandler(func(r *Record) (pass bool) {
|
||||||
|
switch key {
|
||||||
|
case r.KeyNames.Lvl:
|
||||||
|
return r.Lvl == value
|
||||||
|
case r.KeyNames.Time:
|
||||||
|
return r.Time == value
|
||||||
|
case r.KeyNames.Msg:
|
||||||
|
return r.Msg == value
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(r.Ctx); i += 2 {
|
||||||
|
if r.Ctx[i] == key {
|
||||||
|
return r.Ctx[i+1] == value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LvlFilterHandler returns a Handler that only writes
|
||||||
|
// records which are less than the given verbosity
|
||||||
|
// level to the wrapped Handler. For example, to only
|
||||||
|
// log Error/Crit records:
|
||||||
|
//
|
||||||
|
// log.LvlFilterHandler(log.Error, log.StdoutHandler)
|
||||||
|
//
|
||||||
|
func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
|
||||||
|
return FilterHandler(func(r *Record) (pass bool) {
|
||||||
|
return r.Lvl <= maxLvl
|
||||||
|
}, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A MultiHandler dispatches any write to each of its handlers.
|
||||||
|
// This is useful for writing different types of log information
|
||||||
|
// to different locations. For example, to log to a file and
|
||||||
|
// standard error:
|
||||||
|
//
|
||||||
|
// log.MultiHandler(
|
||||||
|
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
|
||||||
|
// log.StderrHandler)
|
||||||
|
//
|
||||||
|
func MultiHandler(hs ...Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
for _, h := range hs {
|
||||||
|
// what to do about failures?
|
||||||
|
h.Log(r)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// A FailoverHandler writes all log records to the first handler
|
||||||
|
// specified, but will failover and write to the second handler if
|
||||||
|
// the first handler has failed, and so on for all handlers specified.
|
||||||
|
// For example you might want to log to a network socket, but failover
|
||||||
|
// to writing to a file if the network fails, and then to
|
||||||
|
// standard out if the file write fails:
|
||||||
|
//
|
||||||
|
// log.FailoverHandler(
|
||||||
|
// log.Must.NetHandler("tcp", ":9090", log.JsonFormat()),
|
||||||
|
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
|
||||||
|
// log.StdoutHandler)
|
||||||
|
//
|
||||||
|
// All writes that do not go to the first handler will add context with keys of
|
||||||
|
// the form "failover_err_{idx}" which explain the error encountered while
|
||||||
|
// trying to write to the handlers before them in the list.
|
||||||
|
func FailoverHandler(hs ...Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
var err error
|
||||||
|
for i, h := range hs {
|
||||||
|
err = h.Log(r)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelHandler writes all records to the given channel.
|
||||||
|
// It blocks if the channel is full. Useful for async processing
|
||||||
|
// of log messages, it's used by BufferedHandler.
|
||||||
|
func ChannelHandler(recs chan<- *Record) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
recs <- r
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BufferedHandler writes all records to a buffered
|
||||||
|
// channel of the given size which flushes into the wrapped
|
||||||
|
// handler whenever it is available for writing. Since these
|
||||||
|
// writes happen asynchronously, all writes to a BufferedHandler
|
||||||
|
// never return an error and any errors from the wrapped handler are ignored.
|
||||||
|
func BufferedHandler(bufSize int, h Handler) Handler {
|
||||||
|
recs := make(chan *Record, bufSize)
|
||||||
|
go func() {
|
||||||
|
for m := range recs {
|
||||||
|
_ = h.Log(m)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return ChannelHandler(recs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LazyHandler writes all values to the wrapped handler after evaluating
|
||||||
|
// any lazy functions in the record's context. It is already wrapped
|
||||||
|
// around StreamHandler and SyslogHandler in this library, you'll only need
|
||||||
|
// it if you write your own Handler.
|
||||||
|
func LazyHandler(h Handler) Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
// go through the values (odd indices) and reassign
|
||||||
|
// the values of any lazy fn to the result of its execution
|
||||||
|
hadErr := false
|
||||||
|
for i := 1; i < len(r.Ctx); i += 2 {
|
||||||
|
lz, ok := r.Ctx[i].(Lazy)
|
||||||
|
if ok {
|
||||||
|
v, err := evaluateLazy(lz)
|
||||||
|
if err != nil {
|
||||||
|
hadErr = true
|
||||||
|
r.Ctx[i] = err
|
||||||
|
} else {
|
||||||
|
if cs, ok := v.(stack.Trace); ok {
|
||||||
|
v = cs.TrimBelow(stack.Call(r.CallPC[0])).
|
||||||
|
TrimRuntime()
|
||||||
|
}
|
||||||
|
r.Ctx[i] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hadErr {
|
||||||
|
r.Ctx = append(r.Ctx, errorKey, "bad lazy")
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.Log(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluateLazy(lz Lazy) (interface{}, error) {
|
||||||
|
t := reflect.TypeOf(lz.Fn)
|
||||||
|
|
||||||
|
if t.Kind() != reflect.Func {
|
||||||
|
return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.NumIn() > 0 {
|
||||||
|
return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.NumOut() == 0 {
|
||||||
|
return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := reflect.ValueOf(lz.Fn)
|
||||||
|
results := value.Call([]reflect.Value{})
|
||||||
|
if len(results) == 1 {
|
||||||
|
return results[0].Interface(), nil
|
||||||
|
} else {
|
||||||
|
values := make([]interface{}, len(results))
|
||||||
|
for i, v := range results {
|
||||||
|
values[i] = v.Interface()
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiscardHandler reports success for all writes but does nothing.
|
||||||
|
// It is useful for dynamically disabling logging at runtime via
|
||||||
|
// a Logger's SetHandler method.
|
||||||
|
func DiscardHandler() Handler {
|
||||||
|
return FuncHandler(func(r *Record) error {
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Must object provides the following Handler creation functions
|
||||||
|
// which instead of returning an error parameter only return a Handler
|
||||||
|
// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler
|
||||||
|
var Must muster
|
||||||
|
|
||||||
|
func must(h Handler, err error) Handler {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
type muster struct{}
|
||||||
|
|
||||||
|
func (m muster) FileHandler(path string, fmtr Format) Handler {
|
||||||
|
return must(FileHandler(path, fmtr))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m muster) NetHandler(network, addr string, fmtr Format) Handler {
|
||||||
|
return must(NetHandler(network, addr, fmtr))
|
||||||
|
}
|
|
@ -0,0 +1,26 @@
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package log15
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// swapHandler wraps another handler that may be swapped out
|
||||||
|
// dynamically at runtime in a thread-safe fashion.
|
||||||
|
type swapHandler struct {
|
||||||
|
handler interface{}
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *swapHandler) Log(r *Record) error {
|
||||||
|
h.lock.RLock()
|
||||||
|
defer h.lock.RUnlock()
|
||||||
|
|
||||||
|
return h.handler.(Handler).Log(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *swapHandler) Swap(newHandler Handler) {
|
||||||
|
h.lock.Lock()
|
||||||
|
defer h.lock.Unlock()
|
||||||
|
|
||||||
|
h.handler = newHandler
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package log15
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// swapHandler wraps another handler that may be swapped out
|
||||||
|
// dynamically at runtime in a thread-safe fashion.
|
||||||
|
type swapHandler struct {
|
||||||
|
handler unsafe.Pointer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *swapHandler) Log(r *Record) error {
|
||||||
|
return (*(*Handler)(atomic.LoadPointer(&h.handler))).Log(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *swapHandler) Swap(newHandler Handler) {
|
||||||
|
atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
|
||||||
|
}
|
|
@ -0,0 +1,201 @@
|
||||||
|
package log15
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const timeKey = "t"
|
||||||
|
const lvlKey = "lvl"
|
||||||
|
const msgKey = "msg"
|
||||||
|
const errorKey = "LOG15_ERROR"
|
||||||
|
|
||||||
|
type Lvl int
|
||||||
|
|
||||||
|
const (
|
||||||
|
LvlCrit Lvl = iota
|
||||||
|
LvlError
|
||||||
|
LvlWarn
|
||||||
|
LvlInfo
|
||||||
|
LvlDebug
|
||||||
|
)
|
||||||
|
|
||||||
|
// Returns the name of a Lvl
|
||||||
|
func (l Lvl) String() string {
|
||||||
|
switch l {
|
||||||
|
case LvlDebug:
|
||||||
|
return "dbug"
|
||||||
|
case LvlInfo:
|
||||||
|
return "info"
|
||||||
|
case LvlWarn:
|
||||||
|
return "warn"
|
||||||
|
case LvlError:
|
||||||
|
return "eror"
|
||||||
|
case LvlCrit:
|
||||||
|
return "crit"
|
||||||
|
default:
|
||||||
|
panic("bad level")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the appropriate Lvl from a string name.
|
||||||
|
// Useful for parsing command line args and configuration files.
|
||||||
|
func LvlFromString(lvlString string) (Lvl, error) {
|
||||||
|
switch lvlString {
|
||||||
|
case "debug", "dbug":
|
||||||
|
return LvlDebug, nil
|
||||||
|
case "info":
|
||||||
|
return LvlInfo, nil
|
||||||
|
case "warn":
|
||||||
|
return LvlWarn, nil
|
||||||
|
case "error", "eror":
|
||||||
|
return LvlError, nil
|
||||||
|
case "crit":
|
||||||
|
return LvlCrit, nil
|
||||||
|
default:
|
||||||
|
return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Record is what a Logger asks its handler to write
|
||||||
|
type Record struct {
|
||||||
|
Time time.Time
|
||||||
|
Lvl Lvl
|
||||||
|
Msg string
|
||||||
|
Ctx []interface{}
|
||||||
|
CallPC [1]uintptr
|
||||||
|
KeyNames RecordKeyNames
|
||||||
|
}
|
||||||
|
|
||||||
|
type RecordKeyNames struct {
|
||||||
|
Time string
|
||||||
|
Msg string
|
||||||
|
Lvl string
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Logger writes key/value pairs to a Handler
|
||||||
|
type Logger interface {
|
||||||
|
// New returns a new Logger that has this logger's context plus the given context
|
||||||
|
New(ctx ...interface{}) Logger
|
||||||
|
|
||||||
|
// SetHandler updates the logger to write records to the specified handler.
|
||||||
|
SetHandler(h Handler)
|
||||||
|
|
||||||
|
// Log a message at the given level with context key/value pairs
|
||||||
|
Debug(msg string, ctx ...interface{})
|
||||||
|
Info(msg string, ctx ...interface{})
|
||||||
|
Warn(msg string, ctx ...interface{})
|
||||||
|
Error(msg string, ctx ...interface{})
|
||||||
|
Crit(msg string, ctx ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type logger struct {
|
||||||
|
ctx []interface{}
|
||||||
|
h *swapHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) {
|
||||||
|
r := Record{
|
||||||
|
Time: time.Now(),
|
||||||
|
Lvl: lvl,
|
||||||
|
Msg: msg,
|
||||||
|
Ctx: newContext(l.ctx, ctx),
|
||||||
|
KeyNames: RecordKeyNames{
|
||||||
|
Time: timeKey,
|
||||||
|
Msg: msgKey,
|
||||||
|
Lvl: lvlKey,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
runtime.Callers(3, r.CallPC[:])
|
||||||
|
l.h.Log(&r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) New(ctx ...interface{}) Logger {
|
||||||
|
child := &logger{newContext(l.ctx, ctx), new(swapHandler)}
|
||||||
|
child.SetHandler(l.h)
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
|
||||||
|
func newContext(prefix []interface{}, suffix []interface{}) []interface{} {
|
||||||
|
normalizedSuffix := normalize(suffix)
|
||||||
|
newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix))
|
||||||
|
n := copy(newCtx, prefix)
|
||||||
|
copy(newCtx[n:], normalizedSuffix)
|
||||||
|
return newCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Debug(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlDebug, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Info(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlInfo, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Warn(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlWarn, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Error(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlError, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Crit(msg string, ctx ...interface{}) {
|
||||||
|
l.write(msg, LvlCrit, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) SetHandler(h Handler) {
|
||||||
|
l.h.Swap(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalize(ctx []interface{}) []interface{} {
|
||||||
|
// if the caller passed a Ctx object, then expand it
|
||||||
|
if len(ctx) == 1 {
|
||||||
|
if ctxMap, ok := ctx[0].(Ctx); ok {
|
||||||
|
ctx = ctxMap.toArray()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ctx needs to be even because it's a series of key/value pairs
|
||||||
|
// no one wants to check for errors on logging functions,
|
||||||
|
// so instead of erroring on bad input, we'll just make sure
|
||||||
|
// that things are the right length and users can fix bugs
|
||||||
|
// when they see the output looks wrong
|
||||||
|
if len(ctx)%2 != 0 {
|
||||||
|
ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lazy allows you to defer calculation of a logged value that is expensive
|
||||||
|
// to compute until it is certain that it must be evaluated with the given filters.
|
||||||
|
//
|
||||||
|
// Lazy may also be used in conjunction with a Logger's New() function
|
||||||
|
// to generate a child logger which always reports the current value of changing
|
||||||
|
// state.
|
||||||
|
//
|
||||||
|
// You may wrap any function which takes no arguments to Lazy. It may return any
|
||||||
|
// number of values of any type.
|
||||||
|
type Lazy struct {
|
||||||
|
Fn interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ctx is a map of key/value pairs to pass as context to a log function
|
||||||
|
// Use this only if you really need greater safety around the arguments you pass
|
||||||
|
// to the logging functions.
|
||||||
|
type Ctx map[string]interface{}
|
||||||
|
|
||||||
|
func (c Ctx) toArray() []interface{} {
|
||||||
|
arr := make([]interface{}, len(c)*2)
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for k, v := range c {
|
||||||
|
arr[i] = k
|
||||||
|
arr[i+1] = v
|
||||||
|
i += 2
|
||||||
|
}
|
||||||
|
|
||||||
|
return arr
|
||||||
|
}
|
|
@ -0,0 +1,67 @@
|
||||||
|
package log15
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/inconshreveable/log15/term"
|
||||||
|
"github.com/mattn/go-colorable"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
root *logger
|
||||||
|
StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat())
|
||||||
|
StderrHandler = StreamHandler(os.Stderr, LogfmtFormat())
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if term.IsTty(os.Stdout.Fd()) {
|
||||||
|
StdoutHandler = StreamHandler(colorable.NewColorableStdout(), TerminalFormat())
|
||||||
|
}
|
||||||
|
|
||||||
|
if term.IsTty(os.Stderr.Fd()) {
|
||||||
|
StderrHandler = StreamHandler(colorable.NewColorableStderr(), TerminalFormat())
|
||||||
|
}
|
||||||
|
|
||||||
|
root = &logger{[]interface{}{}, new(swapHandler)}
|
||||||
|
root.SetHandler(StdoutHandler)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new logger with the given context.
|
||||||
|
// New is a convenient alias for Root().New
|
||||||
|
func New(ctx ...interface{}) Logger {
|
||||||
|
return root.New(ctx...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root returns the root logger
|
||||||
|
func Root() Logger {
|
||||||
|
return root
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following functions bypass the exported logger methods (logger.Debug,
|
||||||
|
// etc.) to keep the call depth the same for all paths to logger.write so
|
||||||
|
// runtime.Caller(2) always refers to the call site in client code.
|
||||||
|
|
||||||
|
// Debug is a convenient alias for Root().Debug
|
||||||
|
func Debug(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlDebug, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info is a convenient alias for Root().Info
|
||||||
|
func Info(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlInfo, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn is a convenient alias for Root().Warn
|
||||||
|
func Warn(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlWarn, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error is a convenient alias for Root().Error
|
||||||
|
func Error(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlError, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Crit is a convenient alias for Root().Crit
|
||||||
|
func Crit(msg string, ctx ...interface{}) {
|
||||||
|
root.write(msg, LvlCrit, ctx)
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
// +build !windows,!plan9
|
||||||
|
|
||||||
|
package log15
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/syslog"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SyslogHandler opens a connection to the system syslog daemon by calling
|
||||||
|
// syslog.New and writes all records to it.
|
||||||
|
func SyslogHandler(tag string, fmtr Format) (Handler, error) {
|
||||||
|
wr, err := syslog.New(syslog.LOG_INFO, tag)
|
||||||
|
return sharedSyslog(fmtr, wr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyslogHandler opens a connection to a log daemon over the network and writes
|
||||||
|
// all log records to it.
|
||||||
|
func SyslogNetHandler(net, addr string, tag string, fmtr Format) (Handler, error) {
|
||||||
|
wr, err := syslog.Dial(net, addr, syslog.LOG_INFO, tag)
|
||||||
|
return sharedSyslog(fmtr, wr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) {
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
h := FuncHandler(func(r *Record) error {
|
||||||
|
var syslogFn = sysWr.Info
|
||||||
|
switch r.Lvl {
|
||||||
|
case LvlCrit:
|
||||||
|
syslogFn = sysWr.Crit
|
||||||
|
case LvlError:
|
||||||
|
syslogFn = sysWr.Err
|
||||||
|
case LvlWarn:
|
||||||
|
syslogFn = sysWr.Warning
|
||||||
|
case LvlInfo:
|
||||||
|
syslogFn = sysWr.Info
|
||||||
|
case LvlDebug:
|
||||||
|
syslogFn = sysWr.Debug
|
||||||
|
}
|
||||||
|
|
||||||
|
s := strings.TrimSpace(string(fmtr.Format(r)))
|
||||||
|
return syslogFn(s)
|
||||||
|
})
|
||||||
|
return LazyHandler(&closingHandler{sysWr, h}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m muster) SyslogHandler(tag string, fmtr Format) Handler {
|
||||||
|
return must(SyslogHandler(tag, fmtr))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m muster) SyslogNetHandler(net, addr string, tag string, fmtr Format) Handler {
|
||||||
|
return must(SyslogNetHandler(net, addr, tag, fmtr))
|
||||||
|
}
|
|
@ -0,0 +1,4 @@
|
||||||
|
.DS_Store
|
||||||
|
TODO
|
||||||
|
tmp/**/*
|
||||||
|
*.coverprofile
|
|
@ -0,0 +1,15 @@
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.3
|
||||||
|
- 1.4
|
||||||
|
- 1.5
|
||||||
|
- tip
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get -v -t ./...
|
||||||
|
- go get golang.org/x/tools/cmd/cover
|
||||||
|
- go get github.com/onsi/gomega
|
||||||
|
- go install github.com/onsi/ginkgo/ginkgo
|
||||||
|
- export PATH=$PATH:$HOME/gopath/bin
|
||||||
|
|
||||||
|
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
|
|
@ -0,0 +1,136 @@
|
||||||
|
## HEAD
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- `Skip(message)` can be used to skip the current test.
|
||||||
|
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- Ginkgo tests now fail when you `panic(nil)` (#167)
|
||||||
|
|
||||||
|
## 1.2.0 5/31/2015
|
||||||
|
|
||||||
|
Improvements
|
||||||
|
|
||||||
|
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
|
||||||
|
- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
|
||||||
|
- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
|
||||||
|
|
||||||
|
## 1.2.0-beta
|
||||||
|
|
||||||
|
Ginkgo now requires Go 1.4+
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
|
||||||
|
- Improved focus behavior. Now, this:
|
||||||
|
|
||||||
|
```golang
|
||||||
|
FDescribe("Some describe", func() {
|
||||||
|
It("A", func() {})
|
||||||
|
|
||||||
|
FIt("B", func() {})
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
|
||||||
|
- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
|
||||||
|
- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
|
||||||
|
- Improved output when an error occurs in a setup or teardown block.
|
||||||
|
- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
|
||||||
|
- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
|
||||||
|
- Add support for precompiled tests:
|
||||||
|
- `ginkgo build <path-to-package>` will now compile the package, producing a file named `package.test`
|
||||||
|
- The compiled `package.test` file can be run directly. This runs the tests in series.
|
||||||
|
- To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
|
||||||
|
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
|
||||||
|
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
|
||||||
|
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
|
||||||
|
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
|
||||||
|
- `ginkgo -notify` now works on Linux
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
|
||||||
|
- Fix tempfile leak when running in parallel
|
||||||
|
- Fix incorrect failure message when a panic occurs during a parallel test run
|
||||||
|
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
|
||||||
|
- Be more consistent about handling SIGTERM as well as SIGINT
|
||||||
|
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
|
||||||
|
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
|
||||||
|
|
||||||
|
## 1.1.0 (8/2/2014)
|
||||||
|
|
||||||
|
No changes, just dropping the beta.
|
||||||
|
|
||||||
|
## 1.1.0-beta (7/22/2014)
|
||||||
|
New Features:
|
||||||
|
|
||||||
|
- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
|
||||||
|
- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass. This allows CI systems to detect accidental commits of focused test suites.
|
||||||
|
- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
|
||||||
|
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
|
||||||
|
- `ginkgo --failFast` aborts the test suite after the first failure.
|
||||||
|
- `ginkgo generate file_1 file_2` can take multiple file arguments.
|
||||||
|
- Ginkgo now summarizes any spec failures that occured at the end of the test run.
|
||||||
|
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
|
||||||
|
- `ginkgo --untilItFails` no longer recompiles between attempts.
|
||||||
|
- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
|
||||||
|
- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
|
||||||
|
|
||||||
|
## 1.0.0 (5/24/2014)
|
||||||
|
New Features:
|
||||||
|
|
||||||
|
- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
|
||||||
|
- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
|
||||||
|
- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
|
||||||
|
- Fix all remaining race conditions in Ginkgo's test suite.
|
||||||
|
|
||||||
|
## 1.0.0-beta (4/14/2014)
|
||||||
|
Breaking changes:
|
||||||
|
|
||||||
|
- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
|
||||||
|
- Modified the Reporter interface
|
||||||
|
- `watch` is now a subcommand, not a flag.
|
||||||
|
|
||||||
|
DSL changes:
|
||||||
|
|
||||||
|
- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
|
||||||
|
- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
|
||||||
|
- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
|
||||||
|
|
||||||
|
CLI changes:
|
||||||
|
|
||||||
|
- `watch` is now a subcommand, not a flag
|
||||||
|
- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
|
||||||
|
- Additional arguments can be passed to specs. Pass them after the `--` separator
|
||||||
|
- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
|
||||||
|
- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
|
||||||
|
|
||||||
|
Misc:
|
||||||
|
|
||||||
|
- Start using semantic versioning
|
||||||
|
- Start maintaining changelog
|
||||||
|
|
||||||
|
Major refactor:
|
||||||
|
|
||||||
|
- Pull out Ginkgo's internal to `internal`
|
||||||
|
- Rename `example` everywhere to `spec`
|
||||||
|
- Much more!
|
|
@ -0,0 +1,20 @@
|
||||||
|
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,115 @@
|
||||||
|
![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo)
|
||||||
|
|
||||||
|
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
|
||||||
|
|
||||||
|
To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
|
||||||
|
|
||||||
|
## Feature List
|
||||||
|
|
||||||
|
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
|
||||||
|
|
||||||
|
- Structure your BDD-style tests expressively:
|
||||||
|
- Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
|
||||||
|
- [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
|
||||||
|
- [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
|
||||||
|
- [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
|
||||||
|
- [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
|
||||||
|
|
||||||
|
- A comprehensive test runner that lets you:
|
||||||
|
- Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
|
||||||
|
- [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
|
||||||
|
- Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
|
||||||
|
- Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
|
||||||
|
|
||||||
|
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
|
||||||
|
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
|
||||||
|
- `ginkgo -cover` runs your tests using Golang's code coverage tool
|
||||||
|
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
|
||||||
|
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
|
||||||
|
- `ginkgo -r` runs all tests suites under the current directory
|
||||||
|
- `ginkgo -v` prints out identifying information for each tests just before it runs
|
||||||
|
|
||||||
|
And much more: run `ginkgo help` for details!
|
||||||
|
|
||||||
|
The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
|
||||||
|
|
||||||
|
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
|
||||||
|
|
||||||
|
- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
|
||||||
|
|
||||||
|
- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
|
||||||
|
|
||||||
|
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
|
||||||
|
|
||||||
|
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
|
||||||
|
|
||||||
|
- A modular architecture that lets you easily:
|
||||||
|
- Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
|
||||||
|
- [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
|
||||||
|
|
||||||
|
## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
|
||||||
|
|
||||||
|
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
|
||||||
|
|
||||||
|
## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
|
||||||
|
|
||||||
|
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
|
||||||
|
|
||||||
|
## Set Me Up!
|
||||||
|
|
||||||
|
You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
|
||||||
|
go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
|
||||||
|
go get github.com/onsi/gomega # fetches the matcher library
|
||||||
|
|
||||||
|
cd path/to/package/you/want/to/test
|
||||||
|
|
||||||
|
ginkgo bootstrap # set up a new ginkgo suite
|
||||||
|
ginkgo generate # will create a sample test file. edit this file and add your tests then...
|
||||||
|
|
||||||
|
go test # to run your tests
|
||||||
|
|
||||||
|
ginkgo # also runs your tests
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## I'm new to Go: What are my testing options?
|
||||||
|
|
||||||
|
Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
|
||||||
|
|
||||||
|
With that said, it's great to know what your options are :)
|
||||||
|
|
||||||
|
### What Golang gives you out of the box
|
||||||
|
|
||||||
|
Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
|
||||||
|
|
||||||
|
### Matcher libraries for Golang's XUnit style tests
|
||||||
|
|
||||||
|
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
|
||||||
|
|
||||||
|
- [testify](https://github.com/stretchr/testify)
|
||||||
|
- [gocheck](http://labix.org/gocheck)
|
||||||
|
|
||||||
|
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
|
||||||
|
|
||||||
|
### BDD style testing frameworks
|
||||||
|
|
||||||
|
There are a handful of BDD-style testing frameworks written for Golang. Here are a few:
|
||||||
|
|
||||||
|
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
|
||||||
|
- [GoConvey](https://github.com/smartystreets/goconvey)
|
||||||
|
- [Goblin](https://github.com/franela/goblin)
|
||||||
|
- [Mao](https://github.com/azer/mao)
|
||||||
|
- [Zen](https://github.com/pranavraja/zen)
|
||||||
|
|
||||||
|
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
|
||||||
|
|
||||||
|
Go explore!
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Ginkgo is MIT-Licensed
|
|
@ -0,0 +1,170 @@
|
||||||
|
/*
|
||||||
|
Ginkgo accepts a number of configuration options.
|
||||||
|
|
||||||
|
These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
|
||||||
|
|
||||||
|
You can also learn more via
|
||||||
|
|
||||||
|
ginkgo help
|
||||||
|
|
||||||
|
or (I kid you not):
|
||||||
|
|
||||||
|
go test -asdf
|
||||||
|
*/
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const VERSION = "1.2.0"
|
||||||
|
|
||||||
|
type GinkgoConfigType struct {
|
||||||
|
RandomSeed int64
|
||||||
|
RandomizeAllSpecs bool
|
||||||
|
FocusString string
|
||||||
|
SkipString string
|
||||||
|
SkipMeasurements bool
|
||||||
|
FailOnPending bool
|
||||||
|
FailFast bool
|
||||||
|
EmitSpecProgress bool
|
||||||
|
DryRun bool
|
||||||
|
|
||||||
|
ParallelNode int
|
||||||
|
ParallelTotal int
|
||||||
|
SyncHost string
|
||||||
|
StreamHost string
|
||||||
|
}
|
||||||
|
|
||||||
|
var GinkgoConfig = GinkgoConfigType{}
|
||||||
|
|
||||||
|
type DefaultReporterConfigType struct {
|
||||||
|
NoColor bool
|
||||||
|
SlowSpecThreshold float64
|
||||||
|
NoisyPendings bool
|
||||||
|
Succinct bool
|
||||||
|
Verbose bool
|
||||||
|
FullTrace bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultReporterConfig = DefaultReporterConfigType{}
|
||||||
|
|
||||||
|
func processPrefix(prefix string) string {
|
||||||
|
if prefix != "" {
|
||||||
|
prefix = prefix + "."
|
||||||
|
}
|
||||||
|
return prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||||
|
prefix = processPrefix(prefix)
|
||||||
|
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.")
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
|
||||||
|
flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
|
||||||
|
flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
|
||||||
|
|
||||||
|
if includeParallelFlags {
|
||||||
|
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
|
||||||
|
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
|
||||||
|
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
|
||||||
|
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
|
||||||
|
}
|
||||||
|
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
|
||||||
|
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
||||||
|
}
|
||||||
|
|
||||||
|
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
||||||
|
prefix = processPrefix(prefix)
|
||||||
|
result := make([]string, 0)
|
||||||
|
|
||||||
|
if ginkgo.RandomSeed > 0 {
|
||||||
|
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.RandomizeAllSpecs {
|
||||||
|
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.SkipMeasurements {
|
||||||
|
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.FailOnPending {
|
||||||
|
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.FailFast {
|
||||||
|
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.DryRun {
|
||||||
|
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.FocusString != "" {
|
||||||
|
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.SkipString != "" {
|
||||||
|
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.EmitSpecProgress {
|
||||||
|
result = append(result, fmt.Sprintf("--%sprogress", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.ParallelNode != 0 {
|
||||||
|
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.ParallelTotal != 0 {
|
||||||
|
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.StreamHost != "" {
|
||||||
|
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.SyncHost != "" {
|
||||||
|
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.NoColor {
|
||||||
|
result = append(result, fmt.Sprintf("--%snoColor", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.SlowSpecThreshold > 0 {
|
||||||
|
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reporter.NoisyPendings {
|
||||||
|
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.Verbose {
|
||||||
|
result = append(result, fmt.Sprintf("--%sv", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.Succinct {
|
||||||
|
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.FullTrace {
|
||||||
|
result = append(result, fmt.Sprintf("--%strace", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
|
@ -0,0 +1,98 @@
|
||||||
|
/*
|
||||||
|
|
||||||
|
Table provides a simple DSL for Ginkgo-native Table-Driven Tests
|
||||||
|
|
||||||
|
The godoc documentation describes Table's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
package table
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
DescribeTable describes a table-driven test.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
DescribeTable("a simple table",
|
||||||
|
func(x int, y int, expected bool) {
|
||||||
|
Ω(x > y).Should(Equal(expected))
|
||||||
|
},
|
||||||
|
Entry("x > y", 1, 0, true),
|
||||||
|
Entry("x == y", 0, 0, false),
|
||||||
|
Entry("x < y", 0, 1, false),
|
||||||
|
)
|
||||||
|
|
||||||
|
The first argument to `DescribeTable` is a string description.
|
||||||
|
The second argument is a function that will be run for each table entry. Your assertions go here - the function is equivalent to a Ginkgo It.
|
||||||
|
The subsequent arguments must be of type `TableEntry`. We recommend using the `Entry` convenience constructors.
|
||||||
|
|
||||||
|
The `Entry` constructor takes a string description followed by an arbitrary set of parameters. These parameters are passed into your function.
|
||||||
|
|
||||||
|
Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each `Entry` is turned into an `It` within the `Describe`.
|
||||||
|
|
||||||
|
It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run).
|
||||||
|
|
||||||
|
Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable.
|
||||||
|
*/
|
||||||
|
func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||||
|
describeTable(description, itBody, entries, false, false)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
|
||||||
|
*/
|
||||||
|
func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||||
|
describeTable(description, itBody, entries, false, true)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
|
||||||
|
*/
|
||||||
|
func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||||
|
describeTable(description, itBody, entries, true, false)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
|
||||||
|
*/
|
||||||
|
func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||||
|
describeTable(description, itBody, entries, true, false)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) {
|
||||||
|
itBodyValue := reflect.ValueOf(itBody)
|
||||||
|
if itBodyValue.Kind() != reflect.Func {
|
||||||
|
panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody))
|
||||||
|
}
|
||||||
|
|
||||||
|
if pending {
|
||||||
|
ginkgo.PDescribe(description, func() {
|
||||||
|
for _, entry := range entries {
|
||||||
|
entry.generateIt(itBodyValue)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} else if focused {
|
||||||
|
ginkgo.FDescribe(description, func() {
|
||||||
|
for _, entry := range entries {
|
||||||
|
entry.generateIt(itBodyValue)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
ginkgo.Describe(description, func() {
|
||||||
|
for _, entry := range entries {
|
||||||
|
entry.generateIt(itBodyValue)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,81 @@
|
||||||
|
package table
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
|
||||||
|
*/
|
||||||
|
type TableEntry struct {
|
||||||
|
Description string
|
||||||
|
Parameters []interface{}
|
||||||
|
Pending bool
|
||||||
|
Focused bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t TableEntry) generateIt(itBody reflect.Value) {
|
||||||
|
if t.Pending {
|
||||||
|
ginkgo.PIt(t.Description)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
values := []reflect.Value{}
|
||||||
|
for i, param := range t.Parameters {
|
||||||
|
var value reflect.Value
|
||||||
|
|
||||||
|
if param == nil {
|
||||||
|
inType := itBody.Type().In(i)
|
||||||
|
value = reflect.Zero(inType)
|
||||||
|
} else {
|
||||||
|
value = reflect.ValueOf(param)
|
||||||
|
}
|
||||||
|
|
||||||
|
values = append(values, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
body := func() {
|
||||||
|
itBody.Call(values)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Focused {
|
||||||
|
ginkgo.FIt(t.Description, body)
|
||||||
|
} else {
|
||||||
|
ginkgo.It(t.Description, body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Entry constructs a TableEntry.
|
||||||
|
|
||||||
|
The first argument is a required description (this becomes the content of the generated Ginkgo `It`).
|
||||||
|
Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`.
|
||||||
|
|
||||||
|
Each Entry ends up generating an individual Ginkgo It.
|
||||||
|
*/
|
||||||
|
func Entry(description string, parameters ...interface{}) TableEntry {
|
||||||
|
return TableEntry{description, parameters, false, false}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can focus a particular entry with FEntry. This is equivalent to FIt.
|
||||||
|
*/
|
||||||
|
func FEntry(description string, parameters ...interface{}) TableEntry {
|
||||||
|
return TableEntry{description, parameters, false, true}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
|
||||||
|
*/
|
||||||
|
func PEntry(description string, parameters ...interface{}) TableEntry {
|
||||||
|
return TableEntry{description, parameters, true, false}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
|
||||||
|
*/
|
||||||
|
func XEntry(description string, parameters ...interface{}) TableEntry {
|
||||||
|
return TableEntry{description, parameters, true, false}
|
||||||
|
}
|
|
@ -0,0 +1,182 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"go/build"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildBootstrapCommand() *Command {
|
||||||
|
var agouti, noDot bool
|
||||||
|
flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError)
|
||||||
|
flagSet.BoolVar(&agouti, "agouti", false, "If set, bootstrap will generate a bootstrap file for writing Agouti tests")
|
||||||
|
flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega")
|
||||||
|
|
||||||
|
return &Command{
|
||||||
|
Name: "bootstrap",
|
||||||
|
FlagSet: flagSet,
|
||||||
|
UsageCommand: "ginkgo bootstrap <FLAGS>",
|
||||||
|
Usage: []string{
|
||||||
|
"Bootstrap a test suite for the current package",
|
||||||
|
"Accepts the following flags:",
|
||||||
|
},
|
||||||
|
Command: func(args []string, additionalArgs []string) {
|
||||||
|
generateBootstrap(agouti, noDot)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var bootstrapText = `package {{.Package}}_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
{{.GinkgoImport}}
|
||||||
|
{{.GomegaImport}}
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test{{.FormattedName}}(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "{{.FormattedName}} Suite")
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var agoutiBootstrapText = `package {{.Package}}_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
{{.GinkgoImport}}
|
||||||
|
{{.GomegaImport}}
|
||||||
|
"github.com/sclevine/agouti"
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test{{.FormattedName}}(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "{{.FormattedName}} Suite")
|
||||||
|
}
|
||||||
|
|
||||||
|
var agoutiDriver *agouti.WebDriver
|
||||||
|
|
||||||
|
var _ = BeforeSuite(func() {
|
||||||
|
// Choose a WebDriver:
|
||||||
|
|
||||||
|
agoutiDriver = agouti.PhantomJS()
|
||||||
|
// agoutiDriver = agouti.Selenium()
|
||||||
|
// agoutiDriver = agouti.ChromeDriver()
|
||||||
|
|
||||||
|
Expect(agoutiDriver.Start()).To(Succeed())
|
||||||
|
})
|
||||||
|
|
||||||
|
var _ = AfterSuite(func() {
|
||||||
|
Expect(agoutiDriver.Stop()).To(Succeed())
|
||||||
|
})
|
||||||
|
`
|
||||||
|
|
||||||
|
type bootstrapData struct {
|
||||||
|
Package string
|
||||||
|
FormattedName string
|
||||||
|
GinkgoImport string
|
||||||
|
GomegaImport string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPackageAndFormattedName() (string, string, string) {
|
||||||
|
path, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
complainAndQuit("Could not get current working directory: \n" + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
dirName := strings.Replace(filepath.Base(path), "-", "_", -1)
|
||||||
|
dirName = strings.Replace(dirName, " ", "_", -1)
|
||||||
|
|
||||||
|
pkg, err := build.ImportDir(path, 0)
|
||||||
|
packageName := pkg.Name
|
||||||
|
if err != nil {
|
||||||
|
packageName = dirName
|
||||||
|
}
|
||||||
|
|
||||||
|
formattedName := prettifyPackageName(filepath.Base(path))
|
||||||
|
return packageName, dirName, formattedName
|
||||||
|
}
|
||||||
|
|
||||||
|
func prettifyPackageName(name string) string {
|
||||||
|
name = strings.Replace(name, "-", " ", -1)
|
||||||
|
name = strings.Replace(name, "_", " ", -1)
|
||||||
|
name = strings.Title(name)
|
||||||
|
name = strings.Replace(name, " ", "", -1)
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileExists(path string) bool {
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateBootstrap(agouti bool, noDot bool) {
|
||||||
|
packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
|
||||||
|
data := bootstrapData{
|
||||||
|
Package: packageName,
|
||||||
|
FormattedName: formattedName,
|
||||||
|
GinkgoImport: `. "github.com/onsi/ginkgo"`,
|
||||||
|
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||||
|
}
|
||||||
|
|
||||||
|
if noDot {
|
||||||
|
data.GinkgoImport = `"github.com/onsi/ginkgo"`
|
||||||
|
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||||
|
}
|
||||||
|
|
||||||
|
targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
|
||||||
|
if fileExists(targetFile) {
|
||||||
|
fmt.Printf("%s already exists.\n\n", targetFile)
|
||||||
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(targetFile)
|
||||||
|
if err != nil {
|
||||||
|
complainAndQuit("Could not create file: " + err.Error())
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var templateText string
|
||||||
|
if agouti {
|
||||||
|
templateText = agoutiBootstrapText
|
||||||
|
} else {
|
||||||
|
templateText = bootstrapText
|
||||||
|
}
|
||||||
|
|
||||||
|
bootstrapTemplate, err := template.New("bootstrap").Parse(templateText)
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
bootstrapTemplate.Execute(buf, data)
|
||||||
|
|
||||||
|
if noDot {
|
||||||
|
contents, err := nodot.ApplyNoDot(buf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
complainAndQuit("Failed to import nodot declarations: " + err.Error())
|
||||||
|
}
|
||||||
|
fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot")
|
||||||
|
buf = bytes.NewBuffer(contents)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteTo(f)
|
||||||
|
|
||||||
|
goFmt(targetFile)
|
||||||
|
}
|
|
@ -0,0 +1,68 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildBuildCommand() *Command {
|
||||||
|
commandFlags := NewBuildCommandFlags(flag.NewFlagSet("build", flag.ExitOnError))
|
||||||
|
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||||
|
builder := &SpecBuilder{
|
||||||
|
commandFlags: commandFlags,
|
||||||
|
interruptHandler: interruptHandler,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Command{
|
||||||
|
Name: "build",
|
||||||
|
FlagSet: commandFlags.FlagSet,
|
||||||
|
UsageCommand: "ginkgo build <FLAGS> <PACKAGES>",
|
||||||
|
Usage: []string{
|
||||||
|
"Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||||
|
"Accepts the following flags:",
|
||||||
|
},
|
||||||
|
Command: builder.BuildSpecs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SpecBuilder struct {
|
||||||
|
commandFlags *RunWatchAndBuildCommandFlags
|
||||||
|
interruptHandler *interrupthandler.InterruptHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
|
||||||
|
r.commandFlags.computeNodes()
|
||||||
|
|
||||||
|
suites, _ := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, false)
|
||||||
|
|
||||||
|
if len(suites) == 0 {
|
||||||
|
complainAndQuit("Found no test suites")
|
||||||
|
}
|
||||||
|
|
||||||
|
passed := true
|
||||||
|
for _, suite := range suites {
|
||||||
|
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil)
|
||||||
|
fmt.Printf("Compiling %s...\n", suite.PackageName)
|
||||||
|
|
||||||
|
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
|
||||||
|
err := runner.CompileTo(path)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err.Error())
|
||||||
|
passed = false
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" compiled %s.test\n", suite.PackageName)
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.CleanUp()
|
||||||
|
}
|
||||||
|
|
||||||
|
if passed {
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
|
@ -0,0 +1,123 @@
|
||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Creates a func init() node
|
||||||
|
*/
|
||||||
|
func createVarUnderscoreBlock() *ast.ValueSpec {
|
||||||
|
valueSpec := &ast.ValueSpec{}
|
||||||
|
object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0}
|
||||||
|
ident := &ast.Ident{Name: "_", Obj: object}
|
||||||
|
valueSpec.Names = append(valueSpec.Names, ident)
|
||||||
|
return valueSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Creates a Describe("Testing with ginkgo", func() { }) node
|
||||||
|
*/
|
||||||
|
func createDescribeBlock() *ast.CallExpr {
|
||||||
|
blockStatement := &ast.BlockStmt{List: []ast.Stmt{}}
|
||||||
|
|
||||||
|
fieldList := &ast.FieldList{}
|
||||||
|
funcType := &ast.FuncType{Params: fieldList}
|
||||||
|
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
|
||||||
|
basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""}
|
||||||
|
describeIdent := &ast.Ident{Name: "Describe"}
|
||||||
|
return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Convenience function to return the name of the *testing.T param
|
||||||
|
* for a Test function that will be rewritten. This is useful because
|
||||||
|
* we will want to replace the usage of this named *testing.T inside the
|
||||||
|
* body of the function with a GinktoT.
|
||||||
|
*/
|
||||||
|
func namedTestingTArg(node *ast.FuncDecl) string {
|
||||||
|
return node.Type.Params.List[0].Names[0].Name // *exhale*
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Convenience function to return the block statement node for a Describe statement
|
||||||
|
*/
|
||||||
|
func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt {
|
||||||
|
var funcLit *ast.FuncLit
|
||||||
|
var found = false
|
||||||
|
|
||||||
|
for _, node := range desc.Args {
|
||||||
|
switch node := node.(type) {
|
||||||
|
case *ast.FuncLit:
|
||||||
|
found = true
|
||||||
|
funcLit = node
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.")
|
||||||
|
}
|
||||||
|
|
||||||
|
return funcLit.Body
|
||||||
|
}
|
||||||
|
|
||||||
|
/* convenience function for creating an It("TestNameHere")
|
||||||
|
* with all the body of the test function inside the anonymous
|
||||||
|
* func passed to It()
|
||||||
|
*/
|
||||||
|
func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt {
|
||||||
|
blockStatement := &ast.BlockStmt{List: testFunc.Body.List}
|
||||||
|
fieldList := &ast.FieldList{}
|
||||||
|
funcType := &ast.FuncType{Params: fieldList}
|
||||||
|
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
|
||||||
|
|
||||||
|
testName := rewriteTestName(testFunc.Name.Name)
|
||||||
|
basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)}
|
||||||
|
itBlockIdent := &ast.Ident{Name: "It"}
|
||||||
|
callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}}
|
||||||
|
return &ast.ExprStmt{X: callExpr}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rewrite test names to be human readable
|
||||||
|
* eg: rewrites "TestSomethingAmazing" as "something amazing"
|
||||||
|
*/
|
||||||
|
func rewriteTestName(testName string) string {
|
||||||
|
nameComponents := []string{}
|
||||||
|
currentString := ""
|
||||||
|
indexOfTest := strings.Index(testName, "Test")
|
||||||
|
if indexOfTest != 0 {
|
||||||
|
return testName
|
||||||
|
}
|
||||||
|
|
||||||
|
testName = strings.Replace(testName, "Test", "", 1)
|
||||||
|
first, rest := testName[0], testName[1:]
|
||||||
|
testName = string(unicode.ToLower(rune(first))) + rest
|
||||||
|
|
||||||
|
for _, rune := range testName {
|
||||||
|
if unicode.IsUpper(rune) {
|
||||||
|
nameComponents = append(nameComponents, currentString)
|
||||||
|
currentString = string(unicode.ToLower(rune))
|
||||||
|
} else {
|
||||||
|
currentString += string(rune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(append(nameComponents, currentString), " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr {
|
||||||
|
return &ast.CallExpr{
|
||||||
|
Lparen: ident.NamePos + 1,
|
||||||
|
Rparen: ident.NamePos + 2,
|
||||||
|
Fun: &ast.Ident{Name: "GinkgoT"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGinkgoTInterface() *ast.Ident {
|
||||||
|
return &ast.Ident{Name: "GinkgoTInterface"}
|
||||||
|
}
|
|
@ -0,0 +1,91 @@
|
||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given the root node of an AST, returns the node containing the
|
||||||
|
* import statements for the file.
|
||||||
|
*/
|
||||||
|
func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) {
|
||||||
|
for _, declaration := range rootNode.Decls {
|
||||||
|
decl, ok := declaration.(*ast.GenDecl)
|
||||||
|
if !ok || len(decl.Specs) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = decl.Specs[0].(*ast.ImportSpec)
|
||||||
|
if ok {
|
||||||
|
imports = decl
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = errors.New(fmt.Sprintf("Could not find imports for root node:\n\t%#v\n", rootNode))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Removes "testing" import, if present
|
||||||
|
*/
|
||||||
|
func removeTestingImport(rootNode *ast.File) {
|
||||||
|
importDecl, err := importsForRootNode(rootNode)
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
var index int
|
||||||
|
for i, importSpec := range importDecl.Specs {
|
||||||
|
importSpec := importSpec.(*ast.ImportSpec)
|
||||||
|
if importSpec.Path.Value == "\"testing\"" {
|
||||||
|
index = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Adds import statements for onsi/ginkgo, if missing
|
||||||
|
*/
|
||||||
|
func addGinkgoImports(rootNode *ast.File) {
|
||||||
|
importDecl, err := importsForRootNode(rootNode)
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(importDecl.Specs) == 0 {
|
||||||
|
// TODO: might need to create a import decl here
|
||||||
|
panic("unimplemented : expected to find an imports block")
|
||||||
|
}
|
||||||
|
|
||||||
|
needsGinkgo := true
|
||||||
|
for _, importSpec := range importDecl.Specs {
|
||||||
|
importSpec, ok := importSpec.(*ast.ImportSpec)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" {
|
||||||
|
needsGinkgo = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if needsGinkgo {
|
||||||
|
importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\""))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* convenience function to create an import statement
|
||||||
|
*/
|
||||||
|
func createImport(name, path string) *ast.ImportSpec {
|
||||||
|
return &ast.ImportSpec{
|
||||||
|
Name: &ast.Ident{Name: name},
|
||||||
|
Path: &ast.BasicLit{Kind: 9, Value: path},
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,127 @@
|
||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"go/build"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RewritePackage takes a name (eg: my-package/tools), finds its test files using
|
||||||
|
* Go's build package, and then rewrites them. A ginkgo test suite file will
|
||||||
|
* also be added for this package, and all of its child packages.
|
||||||
|
*/
|
||||||
|
func RewritePackage(packageName string) {
|
||||||
|
pkg, err := packageWithName(packageName)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, filename := range findTestsInPackage(pkg) {
|
||||||
|
rewriteTestsInFile(filename)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given a package, findTestsInPackage reads the test files in the directory,
|
||||||
|
* and then recurses on each child package, returning a slice of all test files
|
||||||
|
* found in this process.
|
||||||
|
*/
|
||||||
|
func findTestsInPackage(pkg *build.Package) (testfiles []string) {
|
||||||
|
for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
|
||||||
|
testfiles = append(testfiles, filepath.Join(pkg.Dir, file))
|
||||||
|
}
|
||||||
|
|
||||||
|
dirFiles, err := ioutil.ReadDir(pkg.Dir)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
re := regexp.MustCompile(`^[._]`)
|
||||||
|
|
||||||
|
for _, file := range dirFiles {
|
||||||
|
if !file.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if re.Match([]byte(file.Name())) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
packageName := filepath.Join(pkg.ImportPath, file.Name())
|
||||||
|
subPackage, err := packageWithName(packageName)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
testfiles = append(testfiles, findTestsInPackage(subPackage)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
addGinkgoSuiteForPackage(pkg)
|
||||||
|
goFmtPackage(pkg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Shells out to `ginkgo bootstrap` to create a test suite file
|
||||||
|
*/
|
||||||
|
func addGinkgoSuiteForPackage(pkg *build.Package) {
|
||||||
|
originalDir, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go")
|
||||||
|
|
||||||
|
_, err = os.Stat(suite_test_file)
|
||||||
|
if err == nil {
|
||||||
|
return // test file already exists, this should be a no-op
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Chdir(pkg.Dir)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := exec.Command("ginkgo", "bootstrap").Output()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Chdir(originalDir)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Shells out to `go fmt` to format the package
|
||||||
|
*/
|
||||||
|
func goFmtPackage(pkg *build.Package) {
|
||||||
|
output, err := exec.Command("go", "fmt", pkg.ImportPath).Output()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Attempts to return a package with its test files already read.
|
||||||
|
* The ImportMode arg to build.Import lets you specify if you want go to read the
|
||||||
|
* buildable go files inside the package, but it fails if the package has no go files
|
||||||
|
*/
|
||||||
|
func packageWithName(name string) (pkg *build.Package, err error) {
|
||||||
|
pkg, err = build.Default.Import(name, ".", build.ImportMode(0))
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pkg, err = build.Default.Import(name, ".", build.ImportMode(1))
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go/ast"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given a root node, walks its top level statements and returns
|
||||||
|
* points to function nodes to rewrite as It statements.
|
||||||
|
* These functions, according to Go testing convention, must be named
|
||||||
|
* TestWithCamelCasedName and receive a single *testing.T argument.
|
||||||
|
*/
|
||||||
|
func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) {
|
||||||
|
testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+")
|
||||||
|
|
||||||
|
ast.Inspect(rootNode, func(node ast.Node) bool {
|
||||||
|
if node == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
switch node := node.(type) {
|
||||||
|
case *ast.FuncDecl:
|
||||||
|
matches := testNameRegexp.MatchString(node.Name.Name)
|
||||||
|
|
||||||
|
if matches && receivesTestingT(node) {
|
||||||
|
testsToRewrite = append(testsToRewrite, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* convenience function that looks at args to a function and determines if its
|
||||||
|
* params include an argument of type *testing.T
|
||||||
|
*/
|
||||||
|
func receivesTestingT(node *ast.FuncDecl) bool {
|
||||||
|
if len(node.Type.Params.List) != 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
intermediate := base.X.(*ast.SelectorExpr)
|
||||||
|
isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing"
|
||||||
|
isTestingT := intermediate.Sel.Name == "T"
|
||||||
|
|
||||||
|
return isTestingPackage && isTestingT
|
||||||
|
}
|
163
vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
generated
vendored
Normal file
163
vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/format"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given a file path, rewrites any tests in the Ginkgo format.
|
||||||
|
* First, we parse the AST, and update the imports declaration.
|
||||||
|
* Then, we walk the first child elements in the file, returning tests to rewrite.
|
||||||
|
* A top level init func is declared, with a single Describe func inside.
|
||||||
|
* Then the test functions to rewrite are inserted as It statements inside the Describe.
|
||||||
|
* Finally we walk the rest of the file, replacing other usages of *testing.T
|
||||||
|
* Once that is complete, we write the AST back out again to its file.
|
||||||
|
*/
|
||||||
|
func rewriteTestsInFile(pathToFile string) {
|
||||||
|
fileSet := token.NewFileSet()
|
||||||
|
rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, 0)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
addGinkgoImports(rootNode)
|
||||||
|
removeTestingImport(rootNode)
|
||||||
|
|
||||||
|
varUnderscoreBlock := createVarUnderscoreBlock()
|
||||||
|
describeBlock := createDescribeBlock()
|
||||||
|
varUnderscoreBlock.Values = []ast.Expr{describeBlock}
|
||||||
|
|
||||||
|
for _, testFunc := range findTestFuncs(rootNode) {
|
||||||
|
rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
underscoreDecl := &ast.GenDecl{
|
||||||
|
Tok: 85, // gah, magick numbers are needed to make this work
|
||||||
|
TokPos: 14, // this tricks Go into writing "var _ = Describe"
|
||||||
|
Specs: []ast.Spec{varUnderscoreBlock},
|
||||||
|
}
|
||||||
|
|
||||||
|
imports := rootNode.Decls[0]
|
||||||
|
tail := rootNode.Decls[1:]
|
||||||
|
rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...)
|
||||||
|
rewriteOtherFuncsToUseGinkgoT(rootNode.Decls)
|
||||||
|
walkNodesInRootNodeReplacingTestingT(rootNode)
|
||||||
|
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
if err = format.Node(&buffer, fileSet, rootNode); err != nil {
|
||||||
|
panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fileInfo, err := os.Stat(pathToFile)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
|
||||||
|
}
|
||||||
|
|
||||||
|
ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given a test func named TestDoesSomethingNeat, rewrites it as
|
||||||
|
* It("does something neat", func() { __test_body_here__ }) and adds it
|
||||||
|
* to the Describe's list of statements
|
||||||
|
*/
|
||||||
|
func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) {
|
||||||
|
var funcIndex int = -1
|
||||||
|
for index, child := range rootNode.Decls {
|
||||||
|
if child == testFunc {
|
||||||
|
funcIndex = index
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if funcIndex < 0 {
|
||||||
|
panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
var block *ast.BlockStmt = blockStatementFromDescribe(describe)
|
||||||
|
block.List = append(block.List, createItStatementForTestFunc(testFunc))
|
||||||
|
replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc))
|
||||||
|
|
||||||
|
// remove the old test func from the root node's declarations
|
||||||
|
rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* walks nodes inside of a test func's statements and replaces the usage of
|
||||||
|
* it's named *testing.T param with GinkgoT's
|
||||||
|
*/
|
||||||
|
func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) {
|
||||||
|
ast.Inspect(statementsBlock, func(node ast.Node) bool {
|
||||||
|
if node == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
keyValueExpr, ok := node.(*ast.KeyValueExpr)
|
||||||
|
if ok {
|
||||||
|
replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
funcLiteral, ok := node.(*ast.FuncLit)
|
||||||
|
if ok {
|
||||||
|
replaceTypeDeclTestingTsInFuncLiteral(funcLiteral)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
callExpr, ok := node.(*ast.CallExpr)
|
||||||
|
if !ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
replaceTestingTsInArgsLists(callExpr, testingT)
|
||||||
|
|
||||||
|
funCall, ok := callExpr.Fun.(*ast.SelectorExpr)
|
||||||
|
if ok {
|
||||||
|
replaceTestingTsMethodCalls(funCall, testingT)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rewrite t.Fail() or any other *testing.T method by replacing with T().Fail()
|
||||||
|
* This function receives a selector expression (eg: t.Fail()) and
|
||||||
|
* the name of the *testing.T param from the function declaration. Rewrites the
|
||||||
|
* selector expression in place if the target was a *testing.T
|
||||||
|
*/
|
||||||
|
func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) {
|
||||||
|
ident, ok := selectorExpr.X.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ident.Name == testingT {
|
||||||
|
selectorExpr.X = newGinkgoTFromIdent(ident)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* replaces usages of a named *testing.T param inside of a call expression
|
||||||
|
* with a new GinkgoT object
|
||||||
|
*/
|
||||||
|
func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) {
|
||||||
|
for index, arg := range callExpr.Args {
|
||||||
|
ident, ok := arg.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ident.Name == testingT {
|
||||||
|
callExpr.Args[index] = newGinkgoTFromIdent(ident)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
130
vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
generated
vendored
Normal file
130
vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
generated
vendored
Normal file
|
@ -0,0 +1,130 @@
|
||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go/ast"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Rewrites any other top level funcs that receive a *testing.T param
|
||||||
|
*/
|
||||||
|
func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) {
|
||||||
|
for _, decl := range declarations {
|
||||||
|
decl, ok := decl.(*ast.FuncDecl)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, param := range decl.Type.Params.List {
|
||||||
|
starExpr, ok := param.Type.(*ast.StarExpr)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
xIdent, ok := selectorExpr.X.(*ast.Ident)
|
||||||
|
if !ok || xIdent.Name != "testing" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if selectorExpr.Sel.Name != "T" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
param.Type = newGinkgoTInterface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Walks all of the nodes in the file, replacing *testing.T in struct
|
||||||
|
* and func literal nodes. eg:
|
||||||
|
* type foo struct { *testing.T }
|
||||||
|
* var bar = func(t *testing.T) { }
|
||||||
|
*/
|
||||||
|
func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) {
|
||||||
|
ast.Inspect(rootNode, func(node ast.Node) bool {
|
||||||
|
if node == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
switch node := node.(type) {
|
||||||
|
case *ast.StructType:
|
||||||
|
replaceTestingTsInStructType(node)
|
||||||
|
case *ast.FuncLit:
|
||||||
|
replaceTypeDeclTestingTsInFuncLiteral(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* replaces named *testing.T inside a composite literal
|
||||||
|
*/
|
||||||
|
func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) {
|
||||||
|
ident, ok := kve.Value.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ident.Name == testingT {
|
||||||
|
kve.Value = newGinkgoTFromIdent(ident)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* replaces *testing.T params in a func literal with GinkgoT
|
||||||
|
*/
|
||||||
|
func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) {
|
||||||
|
for _, arg := range functionLiteral.Type.Params.List {
|
||||||
|
starExpr, ok := arg.Type.(*ast.StarExpr)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
target, ok := selectorExpr.X.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if target.Name == "testing" && selectorExpr.Sel.Name == "T" {
|
||||||
|
arg.Type = newGinkgoTInterface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Replaces *testing.T types inside of a struct declaration with a GinkgoT
|
||||||
|
* eg: type foo struct { *testing.T }
|
||||||
|
*/
|
||||||
|
func replaceTestingTsInStructType(structType *ast.StructType) {
|
||||||
|
for _, field := range structType.Fields.List {
|
||||||
|
starExpr, ok := field.Type.(*ast.StarExpr)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
xIdent, ok := selectorExpr.X.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" {
|
||||||
|
field.Type = newGinkgoTInterface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/convert"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildConvertCommand() *Command {
|
||||||
|
return &Command{
|
||||||
|
Name: "convert",
|
||||||
|
FlagSet: flag.NewFlagSet("convert", flag.ExitOnError),
|
||||||
|
UsageCommand: "ginkgo convert /path/to/package",
|
||||||
|
Usage: []string{
|
||||||
|
"Convert the package at the passed in path from an XUnit-style test to a Ginkgo-style test",
|
||||||
|
},
|
||||||
|
Command: convertPackage,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertPackage(args []string, additionalArgs []string) {
|
||||||
|
if len(args) != 1 {
|
||||||
|
println(fmt.Sprintf("usage: ginkgo convert /path/to/your/package"))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := recover()
|
||||||
|
if err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case error:
|
||||||
|
println(err.Error())
|
||||||
|
case string:
|
||||||
|
println(err)
|
||||||
|
default:
|
||||||
|
println(fmt.Sprintf("unexpected error: %#v", err))
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
convert.RewritePackage(args[0])
|
||||||
|
}
|
|
@ -0,0 +1,164 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildGenerateCommand() *Command {
|
||||||
|
var agouti, noDot bool
|
||||||
|
flagSet := flag.NewFlagSet("generate", flag.ExitOnError)
|
||||||
|
flagSet.BoolVar(&agouti, "agouti", false, "If set, generate will generate a test file for writing Agouti tests")
|
||||||
|
flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega")
|
||||||
|
|
||||||
|
return &Command{
|
||||||
|
Name: "generate",
|
||||||
|
FlagSet: flagSet,
|
||||||
|
UsageCommand: "ginkgo generate <filename(s)>",
|
||||||
|
Usage: []string{
|
||||||
|
"Generate a test file named filename_test.go",
|
||||||
|
"If the optional <filenames> argument is omitted, a file named after the package in the current directory will be created.",
|
||||||
|
"Accepts the following flags:",
|
||||||
|
},
|
||||||
|
Command: func(args []string, additionalArgs []string) {
|
||||||
|
generateSpec(args, agouti, noDot)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var specText = `package {{.Package}}_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "{{.PackageImportPath}}"
|
||||||
|
|
||||||
|
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
|
||||||
|
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("{{.Subject}}", func() {
|
||||||
|
|
||||||
|
})
|
||||||
|
`
|
||||||
|
|
||||||
|
var agoutiSpecText = `package {{.Package}}_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "{{.PackageImportPath}}"
|
||||||
|
|
||||||
|
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
|
||||||
|
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
|
||||||
|
. "github.com/sclevine/agouti/matchers"
|
||||||
|
"github.com/sclevine/agouti"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("{{.Subject}}", func() {
|
||||||
|
var page *agouti.Page
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
var err error
|
||||||
|
page, err = agoutiDriver.NewPage()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
Expect(page.Destroy()).To(Succeed())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
`
|
||||||
|
|
||||||
|
type specData struct {
|
||||||
|
Package string
|
||||||
|
Subject string
|
||||||
|
PackageImportPath string
|
||||||
|
IncludeImports bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateSpec(args []string, agouti, noDot bool) {
|
||||||
|
if len(args) == 0 {
|
||||||
|
err := generateSpecForSubject("", agouti, noDot)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err.Error())
|
||||||
|
fmt.Println("")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Println("")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var failed bool
|
||||||
|
for _, arg := range args {
|
||||||
|
err := generateSpecForSubject(arg, agouti, noDot)
|
||||||
|
if err != nil {
|
||||||
|
failed = true
|
||||||
|
fmt.Println(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Println("")
|
||||||
|
if failed {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateSpecForSubject(subject string, agouti, noDot bool) error {
|
||||||
|
packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
|
||||||
|
if subject != "" {
|
||||||
|
subject = strings.Split(subject, ".go")[0]
|
||||||
|
subject = strings.Split(subject, "_test")[0]
|
||||||
|
specFilePrefix = subject
|
||||||
|
formattedName = prettifyPackageName(subject)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := specData{
|
||||||
|
Package: packageName,
|
||||||
|
Subject: formattedName,
|
||||||
|
PackageImportPath: getPackageImportPath(),
|
||||||
|
IncludeImports: !noDot,
|
||||||
|
}
|
||||||
|
|
||||||
|
targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
|
||||||
|
if fileExists(targetFile) {
|
||||||
|
return fmt.Errorf("%s already exists.", targetFile)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(targetFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var templateText string
|
||||||
|
if agouti {
|
||||||
|
templateText = agoutiSpecText
|
||||||
|
} else {
|
||||||
|
templateText = specText
|
||||||
|
}
|
||||||
|
|
||||||
|
specTemplate, err := template.New("spec").Parse(templateText)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
specTemplate.Execute(f, data)
|
||||||
|
goFmt(targetFile)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPackageImportPath() string {
|
||||||
|
workingDir, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
sep := string(filepath.Separator)
|
||||||
|
paths := strings.Split(workingDir, sep+"src"+sep)
|
||||||
|
if len(paths) == 1 {
|
||||||
|
fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
|
||||||
|
return "UNKNOWN_PACKAGE_PATH"
|
||||||
|
}
|
||||||
|
return filepath.ToSlash(paths[len(paths)-1])
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildHelpCommand() *Command {
|
||||||
|
return &Command{
|
||||||
|
Name: "help",
|
||||||
|
FlagSet: flag.NewFlagSet("help", flag.ExitOnError),
|
||||||
|
UsageCommand: "ginkgo help <COMAND>",
|
||||||
|
Usage: []string{
|
||||||
|
"Print usage information. If a command is passed in, print usage information just for that command.",
|
||||||
|
},
|
||||||
|
Command: printHelp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printHelp(args []string, additionalArgs []string) {
|
||||||
|
if len(args) == 0 {
|
||||||
|
usage()
|
||||||
|
} else {
|
||||||
|
command, found := commandMatching(args[0])
|
||||||
|
if !found {
|
||||||
|
complainAndQuit(fmt.Sprintf("Unknown command: %s", args[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
usageForCommand(command, true)
|
||||||
|
}
|
||||||
|
}
|
52
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
generated
vendored
Normal file
52
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
package interrupthandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
type InterruptHandler struct {
|
||||||
|
interruptCount int
|
||||||
|
lock *sync.Mutex
|
||||||
|
C chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInterruptHandler() *InterruptHandler {
|
||||||
|
h := &InterruptHandler{
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
C: make(chan bool, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
go h.handleInterrupt()
|
||||||
|
SwallowSigQuit()
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *InterruptHandler) WasInterrupted() bool {
|
||||||
|
h.lock.Lock()
|
||||||
|
defer h.lock.Unlock()
|
||||||
|
|
||||||
|
return h.interruptCount > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *InterruptHandler) handleInterrupt() {
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||||
|
|
||||||
|
<-c
|
||||||
|
signal.Stop(c)
|
||||||
|
|
||||||
|
h.lock.Lock()
|
||||||
|
h.interruptCount++
|
||||||
|
if h.interruptCount == 1 {
|
||||||
|
close(h.C)
|
||||||
|
} else if h.interruptCount > 5 {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
h.lock.Unlock()
|
||||||
|
|
||||||
|
go h.handleInterrupt()
|
||||||
|
}
|
14
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go
generated
vendored
Normal file
14
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
// +build freebsd openbsd netbsd dragonfly darwin linux
|
||||||
|
|
||||||
|
package interrupthandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SwallowSigQuit() {
|
||||||
|
c := make(chan os.Signal, 1024)
|
||||||
|
signal.Notify(c, syscall.SIGQUIT)
|
||||||
|
}
|
7
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go
generated
vendored
Normal file
7
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package interrupthandler
|
||||||
|
|
||||||
|
func SwallowSigQuit() {
|
||||||
|
//noop
|
||||||
|
}
|
|
@ -0,0 +1,291 @@
|
||||||
|
/*
|
||||||
|
The Ginkgo CLI
|
||||||
|
|
||||||
|
The Ginkgo CLI is fully documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
|
||||||
|
|
||||||
|
You can also learn more by running:
|
||||||
|
|
||||||
|
ginkgo help
|
||||||
|
|
||||||
|
Here are some of the more commonly used commands:
|
||||||
|
|
||||||
|
To install:
|
||||||
|
|
||||||
|
go install github.com/onsi/ginkgo/ginkgo
|
||||||
|
|
||||||
|
To run tests:
|
||||||
|
|
||||||
|
ginkgo
|
||||||
|
|
||||||
|
To run tests in all subdirectories:
|
||||||
|
|
||||||
|
ginkgo -r
|
||||||
|
|
||||||
|
To run tests in particular packages:
|
||||||
|
|
||||||
|
ginkgo <flags> /path/to/package /path/to/another/package
|
||||||
|
|
||||||
|
To pass arguments/flags to your tests:
|
||||||
|
|
||||||
|
ginkgo <flags> <packages> -- <pass-throughs>
|
||||||
|
|
||||||
|
To run tests in parallel
|
||||||
|
|
||||||
|
ginkgo -p
|
||||||
|
|
||||||
|
this will automatically detect the optimal number of nodes to use. Alternatively, you can specify the number of nodes with:
|
||||||
|
|
||||||
|
ginkgo -nodes=N
|
||||||
|
|
||||||
|
(note that you don't need to provide -p in this case).
|
||||||
|
|
||||||
|
By default the Ginkgo CLI will spin up a server that the individual test processes send test output to. The CLI aggregates this output and then presents coherent test output, one test at a time, as each test completes.
|
||||||
|
An alternative is to have the parallel nodes run and stream interleaved output back. This useful for debugging, particularly in contexts where tests hang/fail to start. To get this interleaved output:
|
||||||
|
|
||||||
|
ginkgo -nodes=N -stream=true
|
||||||
|
|
||||||
|
On windows, the default value for stream is true.
|
||||||
|
|
||||||
|
By default, when running multiple tests (with -r or a list of packages) Ginkgo will abort when a test fails. To have Ginkgo run subsequent test suites instead you can:
|
||||||
|
|
||||||
|
ginkgo -keepGoing
|
||||||
|
|
||||||
|
To monitor packages and rerun tests when changes occur:
|
||||||
|
|
||||||
|
ginkgo watch <-r> </path/to/package>
|
||||||
|
|
||||||
|
passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them.
|
||||||
|
`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
|
||||||
|
that depend on X are not rerun.
|
||||||
|
|
||||||
|
[OSX & Linux only] To receive (desktop) notifications when a test run completes:
|
||||||
|
|
||||||
|
ginkgo -notify
|
||||||
|
|
||||||
|
this is particularly useful with `ginkgo watch`. Notifications are currently only supported on OS X and require that you `brew install terminal-notifier`
|
||||||
|
|
||||||
|
Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails. You can do this with:
|
||||||
|
|
||||||
|
ginkgo -untilItFails
|
||||||
|
|
||||||
|
To bootstrap a test suite:
|
||||||
|
|
||||||
|
ginkgo bootstrap
|
||||||
|
|
||||||
|
To generate a test file:
|
||||||
|
|
||||||
|
ginkgo generate <test_file_name>
|
||||||
|
|
||||||
|
To bootstrap/generate test files without using "." imports:
|
||||||
|
|
||||||
|
ginkgo bootstrap --nodot
|
||||||
|
ginkgo generate --nodot
|
||||||
|
|
||||||
|
this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions. When you pull to the latest Ginkgo/Gomega you'll want to run
|
||||||
|
|
||||||
|
ginkgo nodot
|
||||||
|
|
||||||
|
to refresh this list and pull in any new identifiers. In particular, this will pull in any new Gomega matchers that get added.
|
||||||
|
|
||||||
|
To convert an existing XUnit style test suite to a Ginkgo-style test suite:
|
||||||
|
|
||||||
|
ginkgo convert .
|
||||||
|
|
||||||
|
To unfocus tests:
|
||||||
|
|
||||||
|
ginkgo unfocus
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
ginkgo blur
|
||||||
|
|
||||||
|
To compile a test suite:
|
||||||
|
|
||||||
|
ginkgo build <path-to-package>
|
||||||
|
|
||||||
|
will output an executable file named `package.test`. This can be run directly or by invoking
|
||||||
|
|
||||||
|
ginkgo <path-to-package.test>
|
||||||
|
|
||||||
|
To print out Ginkgo's version:
|
||||||
|
|
||||||
|
ginkgo version
|
||||||
|
|
||||||
|
To get more help:
|
||||||
|
|
||||||
|
ginkgo help
|
||||||
|
*/
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||||
|
)
|
||||||
|
|
||||||
|
const greenColor = "\x1b[32m"
|
||||||
|
const redColor = "\x1b[91m"
|
||||||
|
const defaultStyle = "\x1b[0m"
|
||||||
|
const lightGrayColor = "\x1b[37m"
|
||||||
|
|
||||||
|
type Command struct {
|
||||||
|
Name string
|
||||||
|
AltName string
|
||||||
|
FlagSet *flag.FlagSet
|
||||||
|
Usage []string
|
||||||
|
UsageCommand string
|
||||||
|
Command func(args []string, additionalArgs []string)
|
||||||
|
SuppressFlagDocumentation bool
|
||||||
|
FlagDocSubstitute []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Command) Matches(name string) bool {
|
||||||
|
return c.Name == name || (c.AltName != "" && c.AltName == name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Command) Run(args []string, additionalArgs []string) {
|
||||||
|
c.FlagSet.Parse(args)
|
||||||
|
c.Command(c.FlagSet.Args(), additionalArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultCommand *Command
|
||||||
|
var Commands []*Command
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
DefaultCommand = BuildRunCommand()
|
||||||
|
Commands = append(Commands, BuildWatchCommand())
|
||||||
|
Commands = append(Commands, BuildBuildCommand())
|
||||||
|
Commands = append(Commands, BuildBootstrapCommand())
|
||||||
|
Commands = append(Commands, BuildGenerateCommand())
|
||||||
|
Commands = append(Commands, BuildNodotCommand())
|
||||||
|
Commands = append(Commands, BuildConvertCommand())
|
||||||
|
Commands = append(Commands, BuildUnfocusCommand())
|
||||||
|
Commands = append(Commands, BuildVersionCommand())
|
||||||
|
Commands = append(Commands, BuildHelpCommand())
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
args := []string{}
|
||||||
|
additionalArgs := []string{}
|
||||||
|
|
||||||
|
foundDelimiter := false
|
||||||
|
|
||||||
|
for _, arg := range os.Args[1:] {
|
||||||
|
if !foundDelimiter {
|
||||||
|
if arg == "--" {
|
||||||
|
foundDelimiter = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if foundDelimiter {
|
||||||
|
additionalArgs = append(additionalArgs, arg)
|
||||||
|
} else {
|
||||||
|
args = append(args, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(args) > 0 {
|
||||||
|
commandToRun, found := commandMatching(args[0])
|
||||||
|
if found {
|
||||||
|
commandToRun.Run(args[1:], additionalArgs)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DefaultCommand.Run(args, additionalArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func commandMatching(name string) (*Command, bool) {
|
||||||
|
for _, command := range Commands {
|
||||||
|
if command.Matches(name) {
|
||||||
|
return command, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func usage() {
|
||||||
|
fmt.Fprintf(os.Stderr, "Ginkgo Version %s\n\n", config.VERSION)
|
||||||
|
usageForCommand(DefaultCommand, false)
|
||||||
|
for _, command := range Commands {
|
||||||
|
fmt.Fprintf(os.Stderr, "\n")
|
||||||
|
usageForCommand(command, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func usageForCommand(command *Command, longForm bool) {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand)))
|
||||||
|
fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.Usage, "\n"))
|
||||||
|
if command.SuppressFlagDocumentation && !longForm {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.FlagDocSubstitute, "\n "))
|
||||||
|
} else {
|
||||||
|
command.FlagSet.PrintDefaults()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func complainAndQuit(complaint string) {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findSuites(args []string, recurse bool, skipPackage string, allowPrecompiled bool) ([]testsuite.TestSuite, []string) {
|
||||||
|
suites := []testsuite.TestSuite{}
|
||||||
|
|
||||||
|
if len(args) > 0 {
|
||||||
|
for _, arg := range args {
|
||||||
|
if allowPrecompiled {
|
||||||
|
suite, err := testsuite.PrecompiledTestSuite(arg)
|
||||||
|
if err == nil {
|
||||||
|
suites = append(suites, suite)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
suites = append(suites, testsuite.SuitesInDir(arg, recurse)...)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
suites = testsuite.SuitesInDir(".", recurse)
|
||||||
|
}
|
||||||
|
|
||||||
|
skippedPackages := []string{}
|
||||||
|
if skipPackage != "" {
|
||||||
|
skipFilters := strings.Split(skipPackage, ",")
|
||||||
|
filteredSuites := []testsuite.TestSuite{}
|
||||||
|
for _, suite := range suites {
|
||||||
|
skip := false
|
||||||
|
for _, skipFilter := range skipFilters {
|
||||||
|
if strings.Contains(suite.Path, skipFilter) {
|
||||||
|
skip = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if skip {
|
||||||
|
skippedPackages = append(skippedPackages, suite.Path)
|
||||||
|
} else {
|
||||||
|
filteredSuites = append(filteredSuites, suite)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
suites = filteredSuites
|
||||||
|
}
|
||||||
|
|
||||||
|
return suites, skippedPackages
|
||||||
|
}
|
||||||
|
|
||||||
|
func goFmt(path string) {
|
||||||
|
err := exec.Command("go", "fmt", path).Run()
|
||||||
|
if err != nil {
|
||||||
|
complainAndQuit("Could not fmt: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func pluralizedWord(singular, plural string, count int) string {
|
||||||
|
if count == 1 {
|
||||||
|
return singular
|
||||||
|
}
|
||||||
|
return plural
|
||||||
|
}
|
|
@ -0,0 +1,194 @@
|
||||||
|
package nodot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/build"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ApplyNoDot(data []byte) ([]byte, error) {
|
||||||
|
sections, err := generateNodotSections()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, section := range sections {
|
||||||
|
data = section.createOrUpdateIn(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodotSection struct {
|
||||||
|
name string
|
||||||
|
pkg string
|
||||||
|
declarations []string
|
||||||
|
types []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s nodotSection) createOrUpdateIn(data []byte) []byte {
|
||||||
|
renames := map[string]string{}
|
||||||
|
|
||||||
|
contents := string(data)
|
||||||
|
|
||||||
|
lines := strings.Split(contents, "\n")
|
||||||
|
|
||||||
|
comment := "// Declarations for " + s.name
|
||||||
|
|
||||||
|
newLines := []string{}
|
||||||
|
for _, line := range lines {
|
||||||
|
if line == comment {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
words := strings.Split(line, " ")
|
||||||
|
lastWord := words[len(words)-1]
|
||||||
|
|
||||||
|
if s.containsDeclarationOrType(lastWord) {
|
||||||
|
renames[lastWord] = words[1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
newLines = append(newLines, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(newLines[len(newLines)-1]) > 0 {
|
||||||
|
newLines = append(newLines, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
newLines = append(newLines, comment)
|
||||||
|
|
||||||
|
for _, typ := range s.types {
|
||||||
|
name, ok := renames[s.prefix(typ)]
|
||||||
|
if !ok {
|
||||||
|
name = typ
|
||||||
|
}
|
||||||
|
newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ)))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, decl := range s.declarations {
|
||||||
|
name, ok := renames[s.prefix(decl)]
|
||||||
|
if !ok {
|
||||||
|
name = decl
|
||||||
|
}
|
||||||
|
newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl)))
|
||||||
|
}
|
||||||
|
|
||||||
|
newLines = append(newLines, "")
|
||||||
|
|
||||||
|
newContents := strings.Join(newLines, "\n")
|
||||||
|
|
||||||
|
return []byte(newContents)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s nodotSection) prefix(declOrType string) string {
|
||||||
|
return s.pkg + "." + declOrType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s nodotSection) containsDeclarationOrType(word string) bool {
|
||||||
|
for _, declaration := range s.declarations {
|
||||||
|
if s.prefix(declaration) == word {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, typ := range s.types {
|
||||||
|
if s.prefix(typ) == word {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateNodotSections() ([]nodotSection, error) {
|
||||||
|
sections := []nodotSection{}
|
||||||
|
|
||||||
|
declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sections = append(sections, nodotSection{
|
||||||
|
name: "Ginkgo DSL",
|
||||||
|
pkg: "ginkgo",
|
||||||
|
declarations: declarations,
|
||||||
|
types: []string{"Done", "Benchmarker"},
|
||||||
|
})
|
||||||
|
|
||||||
|
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sections = append(sections, nodotSection{
|
||||||
|
name: "Gomega DSL",
|
||||||
|
pkg: "gomega",
|
||||||
|
declarations: declarations,
|
||||||
|
})
|
||||||
|
|
||||||
|
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sections = append(sections, nodotSection{
|
||||||
|
name: "Gomega Matchers",
|
||||||
|
pkg: "gomega",
|
||||||
|
declarations: declarations,
|
||||||
|
})
|
||||||
|
|
||||||
|
return sections, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) {
|
||||||
|
pkg, err := build.Import(pkgPath, ".", 0)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename))
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
blacklistLookup := map[string]bool{}
|
||||||
|
for _, declaration := range blacklist {
|
||||||
|
blacklistLookup[declaration] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
filteredDeclarations := []string{}
|
||||||
|
for _, declaration := range declarations {
|
||||||
|
if blacklistLookup[declaration] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filteredDeclarations = append(filteredDeclarations, declaration)
|
||||||
|
}
|
||||||
|
|
||||||
|
return filteredDeclarations, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getExportedDeclarationsForFile(path string) ([]string, error) {
|
||||||
|
fset := token.NewFileSet()
|
||||||
|
tree, err := parser.ParseFile(fset, path, nil, 0)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
declarations := []string{}
|
||||||
|
ast.FileExports(tree)
|
||||||
|
for _, decl := range tree.Decls {
|
||||||
|
switch x := decl.(type) {
|
||||||
|
case *ast.GenDecl:
|
||||||
|
switch s := x.Specs[0].(type) {
|
||||||
|
case *ast.ValueSpec:
|
||||||
|
declarations = append(declarations, s.Names[0].Name)
|
||||||
|
}
|
||||||
|
case *ast.FuncDecl:
|
||||||
|
declarations = append(declarations, x.Name.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return declarations, nil
|
||||||
|
}
|
|
@ -0,0 +1,76 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"flag"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildNodotCommand() *Command {
|
||||||
|
return &Command{
|
||||||
|
Name: "nodot",
|
||||||
|
FlagSet: flag.NewFlagSet("bootstrap", flag.ExitOnError),
|
||||||
|
UsageCommand: "ginkgo nodot",
|
||||||
|
Usage: []string{
|
||||||
|
"Update the nodot declarations in your test suite",
|
||||||
|
"Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.",
|
||||||
|
"If you've renamed a declaration, that name will be honored and not overwritten.",
|
||||||
|
},
|
||||||
|
Command: updateNodot,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateNodot(args []string, additionalArgs []string) {
|
||||||
|
suiteFile, perm := findSuiteFile()
|
||||||
|
|
||||||
|
data, err := ioutil.ReadFile(suiteFile)
|
||||||
|
if err != nil {
|
||||||
|
complainAndQuit("Failed to update nodot declarations: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := nodot.ApplyNoDot(data)
|
||||||
|
if err != nil {
|
||||||
|
complainAndQuit("Failed to update nodot declarations: " + err.Error())
|
||||||
|
}
|
||||||
|
ioutil.WriteFile(suiteFile, content, perm)
|
||||||
|
|
||||||
|
goFmt(suiteFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findSuiteFile() (string, os.FileMode) {
|
||||||
|
workingDir, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := ioutil.ReadDir(workingDir)
|
||||||
|
if err != nil {
|
||||||
|
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`)
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if file.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
path := filepath.Join(workingDir, file.Name())
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if re.MatchReader(bufio.NewReader(f)) {
|
||||||
|
return path, file.Mode()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.")
|
||||||
|
|
||||||
|
return "", 0
|
||||||
|
}
|
|
@ -0,0 +1,141 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Notifier struct {
|
||||||
|
commandFlags *RunWatchAndBuildCommandFlags
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
|
||||||
|
return &Notifier{
|
||||||
|
commandFlags: commandFlags,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Notifier) VerifyNotificationsAreAvailable() {
|
||||||
|
if n.commandFlags.Notify {
|
||||||
|
onLinux := (runtime.GOOS == "linux")
|
||||||
|
onOSX := (runtime.GOOS == "darwin")
|
||||||
|
if onOSX {
|
||||||
|
|
||||||
|
_, err := exec.LookPath("terminal-notifier")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
|
||||||
|
|
||||||
|
OSX:
|
||||||
|
|
||||||
|
To remedy this:
|
||||||
|
|
||||||
|
brew install terminal-notifier
|
||||||
|
|
||||||
|
To learn more about terminal-notifier:
|
||||||
|
|
||||||
|
https://github.com/alloy/terminal-notifier
|
||||||
|
`)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if onLinux {
|
||||||
|
|
||||||
|
_, err := exec.LookPath("notify-send")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
|
||||||
|
|
||||||
|
Linux:
|
||||||
|
|
||||||
|
Download and install notify-send for your distribution
|
||||||
|
`)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) {
|
||||||
|
if suitePassed {
|
||||||
|
n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName))
|
||||||
|
} else {
|
||||||
|
n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Notifier) SendNotification(title string, subtitle string) {
|
||||||
|
|
||||||
|
if n.commandFlags.Notify {
|
||||||
|
onLinux := (runtime.GOOS == "linux")
|
||||||
|
onOSX := (runtime.GOOS == "darwin")
|
||||||
|
|
||||||
|
if onOSX {
|
||||||
|
|
||||||
|
_, err := exec.LookPath("terminal-notifier")
|
||||||
|
if err == nil {
|
||||||
|
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
|
||||||
|
terminal := os.Getenv("TERM_PROGRAM")
|
||||||
|
if terminal == "iTerm.app" {
|
||||||
|
args = append(args, "-activate", "com.googlecode.iterm2")
|
||||||
|
} else if terminal == "Apple_Terminal" {
|
||||||
|
args = append(args, "-activate", "com.apple.Terminal")
|
||||||
|
}
|
||||||
|
|
||||||
|
exec.Command("terminal-notifier", args...).Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if onLinux {
|
||||||
|
|
||||||
|
_, err := exec.LookPath("notify-send")
|
||||||
|
if err == nil {
|
||||||
|
args := []string{"-a", "ginkgo", title, subtitle}
|
||||||
|
exec.Command("notify-send", args...).Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) {
|
||||||
|
|
||||||
|
command := n.commandFlags.AfterSuiteHook
|
||||||
|
if command != "" {
|
||||||
|
|
||||||
|
// Allow for string replacement to pass input to the command
|
||||||
|
passed := "[FAIL]"
|
||||||
|
if suitePassed {
|
||||||
|
passed = "[PASS]"
|
||||||
|
}
|
||||||
|
command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1)
|
||||||
|
command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1)
|
||||||
|
|
||||||
|
// Must break command into parts
|
||||||
|
splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
|
||||||
|
parts := splitArgs.FindAllString(command, -1)
|
||||||
|
|
||||||
|
output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Post-suite command failed:")
|
||||||
|
if config.DefaultReporterConfig.NoColor {
|
||||||
|
fmt.Printf("\t%s\n", output)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle)
|
||||||
|
}
|
||||||
|
n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook))
|
||||||
|
} else {
|
||||||
|
fmt.Println("Post-suite command succeeded:")
|
||||||
|
if config.DefaultReporterConfig.NoColor {
|
||||||
|
fmt.Printf("\t%s\n", output)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,192 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildRunCommand() *Command {
|
||||||
|
commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError))
|
||||||
|
notifier := NewNotifier(commandFlags)
|
||||||
|
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||||
|
runner := &SpecRunner{
|
||||||
|
commandFlags: commandFlags,
|
||||||
|
notifier: notifier,
|
||||||
|
interruptHandler: interruptHandler,
|
||||||
|
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Command{
|
||||||
|
Name: "",
|
||||||
|
FlagSet: commandFlags.FlagSet,
|
||||||
|
UsageCommand: "ginkgo <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||||
|
Usage: []string{
|
||||||
|
"Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||||
|
"Any arguments after -- will be passed to the test.",
|
||||||
|
"Accepts the following flags:",
|
||||||
|
},
|
||||||
|
Command: runner.RunSpecs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SpecRunner struct {
|
||||||
|
commandFlags *RunWatchAndBuildCommandFlags
|
||||||
|
notifier *Notifier
|
||||||
|
interruptHandler *interrupthandler.InterruptHandler
|
||||||
|
suiteRunner *SuiteRunner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||||
|
r.commandFlags.computeNodes()
|
||||||
|
r.notifier.VerifyNotificationsAreAvailable()
|
||||||
|
|
||||||
|
suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, true)
|
||||||
|
if len(skippedPackages) > 0 {
|
||||||
|
fmt.Println("Will skip:")
|
||||||
|
for _, skippedPackage := range skippedPackages {
|
||||||
|
fmt.Println(" " + skippedPackage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(skippedPackages) > 0 && len(suites) == 0 {
|
||||||
|
fmt.Println("All tests skipped! Exiting...")
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suites) == 0 {
|
||||||
|
complainAndQuit("Found no test suites")
|
||||||
|
}
|
||||||
|
|
||||||
|
r.ComputeSuccinctMode(len(suites))
|
||||||
|
|
||||||
|
t := time.Now()
|
||||||
|
|
||||||
|
runners := []*testrunner.TestRunner{}
|
||||||
|
for _, suite := range suites {
|
||||||
|
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, additionalArgs))
|
||||||
|
}
|
||||||
|
|
||||||
|
numSuites := 0
|
||||||
|
runResult := testrunner.PassingRunResult()
|
||||||
|
if r.commandFlags.UntilItFails {
|
||||||
|
iteration := 0
|
||||||
|
for {
|
||||||
|
r.UpdateSeed()
|
||||||
|
randomizedRunners := r.randomizeOrder(runners)
|
||||||
|
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
|
||||||
|
iteration++
|
||||||
|
|
||||||
|
if r.interruptHandler.WasInterrupted() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if runResult.Passed {
|
||||||
|
fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration))
|
||||||
|
} else {
|
||||||
|
fmt.Printf("\nTests failed on attempt #%d\n\n", iteration)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
randomizedRunners := r.randomizeOrder(runners)
|
||||||
|
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, runner := range runners {
|
||||||
|
runner.CleanUp()
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
|
||||||
|
|
||||||
|
if runResult.Passed {
|
||||||
|
if runResult.HasProgrammaticFocus {
|
||||||
|
fmt.Printf("Test Suite Passed\n")
|
||||||
|
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Test Suite Passed\n")
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Test Suite Failed\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
|
||||||
|
if config.DefaultReporterConfig.Verbose {
|
||||||
|
config.DefaultReporterConfig.Succinct = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if numSuites == 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if numSuites > 1 && !r.commandFlags.wasSet("succinct") {
|
||||||
|
config.DefaultReporterConfig.Succinct = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SpecRunner) UpdateSeed() {
|
||||||
|
if !r.commandFlags.wasSet("seed") {
|
||||||
|
config.GinkgoConfig.RandomSeed = time.Now().Unix()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner {
|
||||||
|
if !r.commandFlags.RandomizeSuites {
|
||||||
|
return runners
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(runners) <= 1 {
|
||||||
|
return runners
|
||||||
|
}
|
||||||
|
|
||||||
|
randomizedRunners := make([]*testrunner.TestRunner, len(runners))
|
||||||
|
randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed))
|
||||||
|
permutation := randomizer.Perm(len(runners))
|
||||||
|
for i, j := range permutation {
|
||||||
|
randomizedRunners[i] = runners[j]
|
||||||
|
}
|
||||||
|
return randomizedRunners
|
||||||
|
}
|
||||||
|
|
||||||
|
func orcMessage(iteration int) string {
|
||||||
|
if iteration < 10 {
|
||||||
|
return ""
|
||||||
|
} else if iteration < 30 {
|
||||||
|
return []string{
|
||||||
|
"If at first you succeed...",
|
||||||
|
"...try, try again.",
|
||||||
|
"Looking good!",
|
||||||
|
"Still good...",
|
||||||
|
"I think your tests are fine....",
|
||||||
|
"Yep, still passing",
|
||||||
|
"Here we go again...",
|
||||||
|
"Even the gophers are getting bored",
|
||||||
|
"Did you try -race?",
|
||||||
|
"Maybe you should stop now?",
|
||||||
|
"I'm getting tired...",
|
||||||
|
"What if I just made you a sandwich?",
|
||||||
|
"Hit ^C, hit ^C, please hit ^C",
|
||||||
|
"Make it stop. Please!",
|
||||||
|
"Come on! Enough is enough!",
|
||||||
|
"Dave, this conversation can serve no purpose anymore. Goodbye.",
|
||||||
|
"Just what do you think you're doing, Dave? ",
|
||||||
|
"I, Sisyphus",
|
||||||
|
"Insanity: doing the same thing over and over again and expecting different results. -Einstein",
|
||||||
|
"I guess Einstein never tried to churn butter",
|
||||||
|
}[iteration-10] + "\n"
|
||||||
|
} else {
|
||||||
|
return "No, seriously... you can probably stop now.\n"
|
||||||
|
}
|
||||||
|
}
|
121
vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
generated
vendored
Normal file
121
vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RunWatchAndBuildCommandFlags struct {
|
||||||
|
Recurse bool
|
||||||
|
Race bool
|
||||||
|
Cover bool
|
||||||
|
CoverPkg string
|
||||||
|
SkipPackage string
|
||||||
|
Tags string
|
||||||
|
|
||||||
|
//for run and watch commands
|
||||||
|
NumCPU int
|
||||||
|
NumCompilers int
|
||||||
|
ParallelStream bool
|
||||||
|
Notify bool
|
||||||
|
AfterSuiteHook string
|
||||||
|
AutoNodes bool
|
||||||
|
|
||||||
|
//only for run command
|
||||||
|
KeepGoing bool
|
||||||
|
UntilItFails bool
|
||||||
|
RandomizeSuites bool
|
||||||
|
|
||||||
|
//only for watch command
|
||||||
|
Depth int
|
||||||
|
|
||||||
|
FlagSet *flag.FlagSet
|
||||||
|
}
|
||||||
|
|
||||||
|
const runMode = 1
|
||||||
|
const watchMode = 2
|
||||||
|
const buildMode = 3
|
||||||
|
|
||||||
|
func NewRunCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||||
|
c := &RunWatchAndBuildCommandFlags{
|
||||||
|
FlagSet: flagSet,
|
||||||
|
}
|
||||||
|
c.flags(runMode)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||||
|
c := &RunWatchAndBuildCommandFlags{
|
||||||
|
FlagSet: flagSet,
|
||||||
|
}
|
||||||
|
c.flags(watchMode)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBuildCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||||
|
c := &RunWatchAndBuildCommandFlags{
|
||||||
|
FlagSet: flagSet,
|
||||||
|
}
|
||||||
|
c.flags(buildMode)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RunWatchAndBuildCommandFlags) wasSet(flagName string) bool {
|
||||||
|
wasSet := false
|
||||||
|
c.FlagSet.Visit(func(f *flag.Flag) {
|
||||||
|
if f.Name == flagName {
|
||||||
|
wasSet = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return wasSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RunWatchAndBuildCommandFlags) computeNodes() {
|
||||||
|
if c.wasSet("nodes") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.AutoNodes {
|
||||||
|
switch n := runtime.NumCPU(); {
|
||||||
|
case n <= 4:
|
||||||
|
c.NumCPU = n
|
||||||
|
default:
|
||||||
|
c.NumCPU = n - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
||||||
|
onWindows := (runtime.GOOS == "windows")
|
||||||
|
|
||||||
|
c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively")
|
||||||
|
c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled")
|
||||||
|
c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory")
|
||||||
|
c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules")
|
||||||
|
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
|
||||||
|
c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
|
||||||
|
|
||||||
|
if mode == runMode || mode == watchMode {
|
||||||
|
config.Flags(c.FlagSet, "", false)
|
||||||
|
c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run")
|
||||||
|
c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
|
||||||
|
c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
|
||||||
|
c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
|
||||||
|
if !onWindows {
|
||||||
|
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
|
||||||
|
}
|
||||||
|
c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
|
||||||
|
}
|
||||||
|
|
||||||
|
if mode == runMode {
|
||||||
|
c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running")
|
||||||
|
c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs")
|
||||||
|
c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run")
|
||||||
|
}
|
||||||
|
|
||||||
|
if mode == watchMode {
|
||||||
|
c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,172 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||||
|
)
|
||||||
|
|
||||||
|
type compilationInput struct {
|
||||||
|
runner *testrunner.TestRunner
|
||||||
|
result chan compilationOutput
|
||||||
|
}
|
||||||
|
|
||||||
|
type compilationOutput struct {
|
||||||
|
runner *testrunner.TestRunner
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type SuiteRunner struct {
|
||||||
|
notifier *Notifier
|
||||||
|
interruptHandler *interrupthandler.InterruptHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
|
||||||
|
return &SuiteRunner{
|
||||||
|
notifier: notifier,
|
||||||
|
interruptHandler: interruptHandler,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
|
||||||
|
//we return this to the consumer, it will return each runner in order as it compiles
|
||||||
|
compilationOutputs := make(chan compilationOutput, len(runners))
|
||||||
|
|
||||||
|
//an array of channels - the nth runner's compilation output is sent to the nth channel in this array
|
||||||
|
//we read from these channels in order to ensure we run the suites in order
|
||||||
|
orderedCompilationOutputs := []chan compilationOutput{}
|
||||||
|
for _ = range runners {
|
||||||
|
orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
//we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
|
||||||
|
//we prefill the channel then close it, this ensures we compile things in the correct order
|
||||||
|
workPool := make(chan compilationInput, len(runners))
|
||||||
|
for i, runner := range runners {
|
||||||
|
workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
|
||||||
|
}
|
||||||
|
close(workPool)
|
||||||
|
|
||||||
|
//pick a reasonable numCompilers
|
||||||
|
if numCompilers == 0 {
|
||||||
|
numCompilers = runtime.NumCPU()
|
||||||
|
}
|
||||||
|
|
||||||
|
//a WaitGroup to help us wait for all compilers to shut down
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(numCompilers)
|
||||||
|
|
||||||
|
//spin up the concurrent compilers
|
||||||
|
for i := 0; i < numCompilers; i++ {
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for input := range workPool {
|
||||||
|
if r.interruptHandler.WasInterrupted() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if willCompile != nil {
|
||||||
|
willCompile(input.runner.Suite)
|
||||||
|
}
|
||||||
|
|
||||||
|
//We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
|
||||||
|
var err error
|
||||||
|
retries := 0
|
||||||
|
for retries <= 5 {
|
||||||
|
if r.interruptHandler.WasInterrupted() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = input.runner.Compile(); err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
retries++
|
||||||
|
}
|
||||||
|
|
||||||
|
input.result <- compilationOutput{input.runner, err}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
//read from the compilation output channels *in order* and send them to the caller
|
||||||
|
//close the compilationOutputs channel to tell the caller we're done
|
||||||
|
go func() {
|
||||||
|
defer close(compilationOutputs)
|
||||||
|
for _, orderedCompilationOutput := range orderedCompilationOutputs {
|
||||||
|
select {
|
||||||
|
case compilationOutput := <-orderedCompilationOutput:
|
||||||
|
compilationOutputs <- compilationOutput
|
||||||
|
case <-r.interruptHandler.C:
|
||||||
|
//interrupt detected, wait for the compilers to shut down then bail
|
||||||
|
//this ensure we clean up after ourselves as we don't leave any compilation processes running
|
||||||
|
wg.Wait()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return compilationOutputs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
|
||||||
|
runResult := testrunner.PassingRunResult()
|
||||||
|
|
||||||
|
compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
|
||||||
|
|
||||||
|
numSuitesThatRan := 0
|
||||||
|
suitesThatFailed := []testsuite.TestSuite{}
|
||||||
|
for compilationOutput := range compilationOutputs {
|
||||||
|
if compilationOutput.err != nil {
|
||||||
|
fmt.Print(compilationOutput.err.Error())
|
||||||
|
}
|
||||||
|
numSuitesThatRan++
|
||||||
|
suiteRunResult := testrunner.FailingRunResult()
|
||||||
|
if compilationOutput.err == nil {
|
||||||
|
suiteRunResult = compilationOutput.runner.Run()
|
||||||
|
}
|
||||||
|
r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||||
|
r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||||
|
runResult = runResult.Merge(suiteRunResult)
|
||||||
|
if !suiteRunResult.Passed {
|
||||||
|
suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
|
||||||
|
if !keepGoing {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
|
||||||
|
fmt.Println("")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if keepGoing && !runResult.Passed {
|
||||||
|
r.listFailedSuites(suitesThatFailed)
|
||||||
|
}
|
||||||
|
|
||||||
|
return runResult, numSuitesThatRan
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) {
|
||||||
|
fmt.Println("")
|
||||||
|
fmt.Println("There were failures detected in the following suites:")
|
||||||
|
|
||||||
|
maxPackageNameLength := 0
|
||||||
|
for _, suite := range suitesThatFailed {
|
||||||
|
if len(suite.PackageName) > maxPackageNameLength {
|
||||||
|
maxPackageNameLength = len(suite.PackageName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
|
||||||
|
|
||||||
|
for _, suite := range suitesThatFailed {
|
||||||
|
if config.DefaultReporterConfig.NoColor {
|
||||||
|
fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,52 @@
|
||||||
|
package testrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type logWriter struct {
|
||||||
|
buffer *bytes.Buffer
|
||||||
|
lock *sync.Mutex
|
||||||
|
log *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLogWriter(target io.Writer, node int) *logWriter {
|
||||||
|
return &logWriter{
|
||||||
|
buffer: &bytes.Buffer{},
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
log: log.New(target, fmt.Sprintf("[%d] ", node), 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *logWriter) Write(data []byte) (n int, err error) {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
|
||||||
|
w.buffer.Write(data)
|
||||||
|
contents := w.buffer.String()
|
||||||
|
|
||||||
|
lines := strings.Split(contents, "\n")
|
||||||
|
for _, line := range lines[0 : len(lines)-1] {
|
||||||
|
w.log.Println(line)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.buffer.Reset()
|
||||||
|
w.buffer.Write([]byte(lines[len(lines)-1]))
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *logWriter) Close() error {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
|
||||||
|
if w.buffer.Len() > 0 {
|
||||||
|
w.log.Println(w.buffer.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
package testrunner
|
||||||
|
|
||||||
|
type RunResult struct {
|
||||||
|
Passed bool
|
||||||
|
HasProgrammaticFocus bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func PassingRunResult() RunResult {
|
||||||
|
return RunResult{
|
||||||
|
Passed: true,
|
||||||
|
HasProgrammaticFocus: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func FailingRunResult() RunResult {
|
||||||
|
return RunResult{
|
||||||
|
Passed: false,
|
||||||
|
HasProgrammaticFocus: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r RunResult) Merge(o RunResult) RunResult {
|
||||||
|
return RunResult{
|
||||||
|
Passed: r.Passed && o.Passed,
|
||||||
|
HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,460 @@
|
||||||
|
package testrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||||
|
"github.com/onsi/ginkgo/internal/remote"
|
||||||
|
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestRunner struct {
|
||||||
|
Suite testsuite.TestSuite
|
||||||
|
|
||||||
|
compiled bool
|
||||||
|
compilationTargetPath string
|
||||||
|
|
||||||
|
numCPU int
|
||||||
|
parallelStream bool
|
||||||
|
race bool
|
||||||
|
cover bool
|
||||||
|
coverPkg string
|
||||||
|
tags string
|
||||||
|
additionalArgs []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, additionalArgs []string) *TestRunner {
|
||||||
|
runner := &TestRunner{
|
||||||
|
Suite: suite,
|
||||||
|
numCPU: numCPU,
|
||||||
|
parallelStream: parallelStream,
|
||||||
|
race: race,
|
||||||
|
cover: cover,
|
||||||
|
coverPkg: coverPkg,
|
||||||
|
tags: tags,
|
||||||
|
additionalArgs: additionalArgs,
|
||||||
|
}
|
||||||
|
|
||||||
|
if !suite.Precompiled {
|
||||||
|
dir, err := ioutil.TempDir("", "ginkgo")
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("coulnd't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
|
||||||
|
}
|
||||||
|
runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
|
||||||
|
}
|
||||||
|
|
||||||
|
return runner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) Compile() error {
|
||||||
|
return t.CompileTo(t.compilationTargetPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) CompileTo(path string) error {
|
||||||
|
if t.compiled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Suite.Precompiled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{"test", "-c", "-i", "-o", path}
|
||||||
|
if t.race {
|
||||||
|
args = append(args, "-race")
|
||||||
|
}
|
||||||
|
if t.cover || t.coverPkg != "" {
|
||||||
|
args = append(args, "-cover", "-covermode=atomic")
|
||||||
|
}
|
||||||
|
if t.coverPkg != "" {
|
||||||
|
args = append(args, fmt.Sprintf("-coverpkg=%s", t.coverPkg))
|
||||||
|
}
|
||||||
|
if t.tags != "" {
|
||||||
|
args = append(args, fmt.Sprintf("-tags=%s", t.tags))
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command("go", args...)
|
||||||
|
|
||||||
|
cmd.Dir = t.Suite.Path
|
||||||
|
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fixedOutput := fixCompilationOutput(string(output), t.Suite.Path)
|
||||||
|
if len(output) > 0 {
|
||||||
|
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileExists(path) == false {
|
||||||
|
compiledFile := filepath.Join(t.Suite.Path, t.Suite.PackageName+".test")
|
||||||
|
if fileExists(compiledFile) {
|
||||||
|
// seems like we are on an old go version that does not support the -o flag on go test
|
||||||
|
// move the compiled test file to the desired location by hand
|
||||||
|
err = os.Rename(compiledFile, path)
|
||||||
|
if err != nil {
|
||||||
|
// We cannot move the file, perhaps because the source and destination
|
||||||
|
// are on different partitions. We can copy the file, however.
|
||||||
|
err = copyFile(compiledFile, path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to copy compiled file: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.compiled = true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileExists(path string) bool {
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
return err == nil || os.IsNotExist(err) == false
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFile copies the contents of the file named src to the file named
|
||||||
|
// by dst. The file will be created if it does not already exist. If the
|
||||||
|
// destination file exists, all it's contents will be replaced by the contents
|
||||||
|
// of the source file.
|
||||||
|
func copyFile(src, dst string) error {
|
||||||
|
srcInfo, err := os.Stat(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mode := srcInfo.Mode()
|
||||||
|
|
||||||
|
in, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer in.Close()
|
||||||
|
|
||||||
|
out, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
closeErr := out.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = io.Copy(out, in)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = out.Sync()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return out.Chmod(mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
go test -c -i spits package.test out into the cwd. there's no way to change this.
|
||||||
|
|
||||||
|
to make sure it doesn't generate conflicting .test files in the cwd, Compile() must switch the cwd to the test package.
|
||||||
|
|
||||||
|
unfortunately, this causes go test's compile output to be expressed *relative to the test package* instead of the cwd.
|
||||||
|
|
||||||
|
this makes it hard to reason about what failed, and also prevents iterm's Cmd+click from working.
|
||||||
|
|
||||||
|
fixCompilationOutput..... rewrites the output to fix the paths.
|
||||||
|
|
||||||
|
yeah......
|
||||||
|
*/
|
||||||
|
func fixCompilationOutput(output string, relToPath string) string {
|
||||||
|
re := regexp.MustCompile(`^(\S.*\.go)\:\d+\:`)
|
||||||
|
lines := strings.Split(output, "\n")
|
||||||
|
for i, line := range lines {
|
||||||
|
indices := re.FindStringSubmatchIndex(line)
|
||||||
|
if len(indices) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
path := line[indices[2]:indices[3]]
|
||||||
|
path = filepath.Join(relToPath, path)
|
||||||
|
lines[i] = path + line[indices[3]:]
|
||||||
|
}
|
||||||
|
return strings.Join(lines, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) Run() RunResult {
|
||||||
|
if t.Suite.IsGinkgo {
|
||||||
|
if t.numCPU > 1 {
|
||||||
|
if t.parallelStream {
|
||||||
|
return t.runAndStreamParallelGinkgoSuite()
|
||||||
|
} else {
|
||||||
|
return t.runParallelGinkgoSuite()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return t.runSerialGinkgoSuite()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return t.runGoTestSuite()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) CleanUp() {
|
||||||
|
if t.Suite.Precompiled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
os.RemoveAll(filepath.Dir(t.compilationTargetPath))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
|
||||||
|
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||||
|
return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) runGoTestSuite() RunResult {
|
||||||
|
return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
|
||||||
|
completions := make(chan RunResult)
|
||||||
|
writers := make([]*logWriter, t.numCPU)
|
||||||
|
|
||||||
|
server, err := remote.NewServer(t.numCPU)
|
||||||
|
if err != nil {
|
||||||
|
panic("Failed to start parallel spec server")
|
||||||
|
}
|
||||||
|
|
||||||
|
server.Start()
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||||
|
config.GinkgoConfig.ParallelNode = cpu + 1
|
||||||
|
config.GinkgoConfig.ParallelTotal = t.numCPU
|
||||||
|
config.GinkgoConfig.SyncHost = server.Address()
|
||||||
|
|
||||||
|
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||||
|
|
||||||
|
writers[cpu] = newLogWriter(os.Stdout, cpu+1)
|
||||||
|
|
||||||
|
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
|
||||||
|
|
||||||
|
server.RegisterAlive(cpu+1, func() bool {
|
||||||
|
if cmd.ProcessState == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return !cmd.ProcessState.Exited()
|
||||||
|
})
|
||||||
|
|
||||||
|
go t.run(cmd, completions)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := PassingRunResult()
|
||||||
|
|
||||||
|
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||||
|
res = res.Merge(<-completions)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, writer := range writers {
|
||||||
|
writer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Stdout.Sync()
|
||||||
|
|
||||||
|
if t.cover || t.coverPkg != "" {
|
||||||
|
t.combineCoverprofiles()
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
||||||
|
result := make(chan bool)
|
||||||
|
completions := make(chan RunResult)
|
||||||
|
writers := make([]*logWriter, t.numCPU)
|
||||||
|
reports := make([]*bytes.Buffer, t.numCPU)
|
||||||
|
|
||||||
|
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
|
||||||
|
aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
|
||||||
|
|
||||||
|
server, err := remote.NewServer(t.numCPU)
|
||||||
|
if err != nil {
|
||||||
|
panic("Failed to start parallel spec server")
|
||||||
|
}
|
||||||
|
server.RegisterReporters(aggregator)
|
||||||
|
server.Start()
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||||
|
config.GinkgoConfig.ParallelNode = cpu + 1
|
||||||
|
config.GinkgoConfig.ParallelTotal = t.numCPU
|
||||||
|
config.GinkgoConfig.SyncHost = server.Address()
|
||||||
|
config.GinkgoConfig.StreamHost = server.Address()
|
||||||
|
|
||||||
|
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||||
|
|
||||||
|
reports[cpu] = &bytes.Buffer{}
|
||||||
|
writers[cpu] = newLogWriter(reports[cpu], cpu+1)
|
||||||
|
|
||||||
|
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
|
||||||
|
|
||||||
|
server.RegisterAlive(cpu+1, func() bool {
|
||||||
|
if cmd.ProcessState == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return !cmd.ProcessState.Exited()
|
||||||
|
})
|
||||||
|
|
||||||
|
go t.run(cmd, completions)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := PassingRunResult()
|
||||||
|
|
||||||
|
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||||
|
res = res.Merge(<-completions)
|
||||||
|
}
|
||||||
|
|
||||||
|
//all test processes are done, at this point
|
||||||
|
//we should be able to wait for the aggregator to tell us that it's done
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-result:
|
||||||
|
fmt.Println("")
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
//the aggregator never got back to us! something must have gone wrong
|
||||||
|
fmt.Println(`
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
| |
|
||||||
|
| Ginkgo timed out waiting for all parallel nodes to report back! |
|
||||||
|
| |
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
`)
|
||||||
|
|
||||||
|
os.Stdout.Sync()
|
||||||
|
|
||||||
|
for _, writer := range writers {
|
||||||
|
writer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, report := range reports {
|
||||||
|
fmt.Print(report.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Stdout.Sync()
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.cover || t.coverPkg != "" {
|
||||||
|
t.combineCoverprofiles()
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
|
||||||
|
args := []string{"--test.timeout=24h"}
|
||||||
|
if t.cover || t.coverPkg != "" {
|
||||||
|
coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
|
||||||
|
if t.numCPU > 1 {
|
||||||
|
coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
|
||||||
|
}
|
||||||
|
args = append(args, coverprofile)
|
||||||
|
}
|
||||||
|
|
||||||
|
args = append(args, ginkgoArgs...)
|
||||||
|
args = append(args, t.additionalArgs...)
|
||||||
|
|
||||||
|
path := t.compilationTargetPath
|
||||||
|
if t.Suite.Precompiled {
|
||||||
|
path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(path, args...)
|
||||||
|
|
||||||
|
cmd.Dir = t.Suite.Path
|
||||||
|
cmd.Stderr = stream
|
||||||
|
cmd.Stdout = stream
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
|
||||||
|
var res RunResult
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if completions != nil {
|
||||||
|
completions <- res
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := cmd.Start()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Failed to run test suite!\n\t%s", err.Error())
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Wait()
|
||||||
|
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||||
|
res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) combineCoverprofiles() {
|
||||||
|
profiles := []string{}
|
||||||
|
for cpu := 1; cpu <= t.numCPU; cpu++ {
|
||||||
|
coverFile := fmt.Sprintf("%s.coverprofile.%d", t.Suite.PackageName, cpu)
|
||||||
|
coverFile = filepath.Join(t.Suite.Path, coverFile)
|
||||||
|
coverProfile, err := ioutil.ReadFile(coverFile)
|
||||||
|
os.Remove(coverFile)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
profiles = append(profiles, string(coverProfile))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(profiles) != t.numCPU {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lines := map[string]int{}
|
||||||
|
lineOrder := []string{}
|
||||||
|
for i, coverProfile := range profiles {
|
||||||
|
for _, line := range strings.Split(string(coverProfile), "\n")[1:] {
|
||||||
|
if len(line) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
components := strings.Split(line, " ")
|
||||||
|
count, _ := strconv.Atoi(components[len(components)-1])
|
||||||
|
prefix := strings.Join(components[0:len(components)-1], " ")
|
||||||
|
lines[prefix] += count
|
||||||
|
if i == 0 {
|
||||||
|
lineOrder = append(lineOrder, prefix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output := []string{"mode: atomic"}
|
||||||
|
for _, line := range lineOrder {
|
||||||
|
output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
|
||||||
|
}
|
||||||
|
finalOutput := strings.Join(output, "\n")
|
||||||
|
ioutil.WriteFile(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.coverprofile", t.Suite.PackageName)), []byte(finalOutput), 0666)
|
||||||
|
}
|
|
@ -0,0 +1,116 @@
|
||||||
|
package testsuite
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestSuite struct {
|
||||||
|
Path string
|
||||||
|
PackageName string
|
||||||
|
IsGinkgo bool
|
||||||
|
Precompiled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrecompiledTestSuite(path string) (TestSuite, error) {
|
||||||
|
info, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return TestSuite{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
return TestSuite{}, errors.New("this is a directory, not a file")
|
||||||
|
}
|
||||||
|
|
||||||
|
if filepath.Ext(path) != ".test" {
|
||||||
|
return TestSuite{}, errors.New("this is not a .test binary")
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Mode()&0111 == 0 {
|
||||||
|
return TestSuite{}, errors.New("this is not executable")
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := relPath(filepath.Dir(path))
|
||||||
|
packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
|
||||||
|
|
||||||
|
return TestSuite{
|
||||||
|
Path: dir,
|
||||||
|
PackageName: packageName,
|
||||||
|
IsGinkgo: true,
|
||||||
|
Precompiled: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SuitesInDir(dir string, recurse bool) []TestSuite {
|
||||||
|
suites := []TestSuite{}
|
||||||
|
|
||||||
|
// "This change will only be enabled if the go command is run with
|
||||||
|
// GO15VENDOREXPERIMENT=1 in its environment."
|
||||||
|
// c.f. the vendor-experiment proposal https://goo.gl/2ucMeC
|
||||||
|
vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT")
|
||||||
|
if (vendorExperiment == "1") && path.Base(dir) == "vendor" {
|
||||||
|
return suites
|
||||||
|
}
|
||||||
|
|
||||||
|
files, _ := ioutil.ReadDir(dir)
|
||||||
|
re := regexp.MustCompile(`_test\.go$`)
|
||||||
|
for _, file := range files {
|
||||||
|
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||||
|
suites = append(suites, New(dir, files))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if recurse {
|
||||||
|
re = regexp.MustCompile(`^[._]`)
|
||||||
|
for _, file := range files {
|
||||||
|
if file.IsDir() && !re.Match([]byte(file.Name())) {
|
||||||
|
suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return suites
|
||||||
|
}
|
||||||
|
|
||||||
|
func relPath(dir string) string {
|
||||||
|
dir, _ = filepath.Abs(dir)
|
||||||
|
cwd, _ := os.Getwd()
|
||||||
|
dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
|
||||||
|
dir = "." + string(filepath.Separator) + dir
|
||||||
|
return dir
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(dir string, files []os.FileInfo) TestSuite {
|
||||||
|
return TestSuite{
|
||||||
|
Path: relPath(dir),
|
||||||
|
PackageName: packageNameForSuite(dir),
|
||||||
|
IsGinkgo: filesHaveGinkgoSuite(dir, files),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func packageNameForSuite(dir string) string {
|
||||||
|
path, _ := filepath.Abs(dir)
|
||||||
|
return filepath.Base(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool {
|
||||||
|
reTestFile := regexp.MustCompile(`_test\.go$`)
|
||||||
|
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`)
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
|
||||||
|
contents, _ := ioutil.ReadFile(dir + "/" + file.Name())
|
||||||
|
if reGinkgo.Match(contents) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildUnfocusCommand() *Command {
|
||||||
|
return &Command{
|
||||||
|
Name: "unfocus",
|
||||||
|
AltName: "blur",
|
||||||
|
FlagSet: flag.NewFlagSet("unfocus", flag.ExitOnError),
|
||||||
|
UsageCommand: "ginkgo unfocus (or ginkgo blur)",
|
||||||
|
Usage: []string{
|
||||||
|
"Recursively unfocuses any focused tests under the current directory",
|
||||||
|
},
|
||||||
|
Command: unfocusSpecs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unfocusSpecs([]string, []string) {
|
||||||
|
unfocus("Describe")
|
||||||
|
unfocus("Context")
|
||||||
|
unfocus("It")
|
||||||
|
unfocus("Measure")
|
||||||
|
unfocus("DescribeTable")
|
||||||
|
unfocus("Entry")
|
||||||
|
}
|
||||||
|
|
||||||
|
func unfocus(component string) {
|
||||||
|
fmt.Printf("Removing F%s...\n", component)
|
||||||
|
cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", ".")
|
||||||
|
out, _ := cmd.CombinedOutput()
|
||||||
|
if string(out) != "" {
|
||||||
|
println(string(out))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildVersionCommand() *Command {
|
||||||
|
return &Command{
|
||||||
|
Name: "version",
|
||||||
|
FlagSet: flag.NewFlagSet("version", flag.ExitOnError),
|
||||||
|
UsageCommand: "ginkgo version",
|
||||||
|
Usage: []string{
|
||||||
|
"Print Ginkgo's version",
|
||||||
|
},
|
||||||
|
Command: printVersion,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printVersion([]string, []string) {
|
||||||
|
fmt.Printf("Ginkgo Version %s\n", config.VERSION)
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
package watch
|
||||||
|
|
||||||
|
import "sort"
|
||||||
|
|
||||||
|
type Delta struct {
|
||||||
|
ModifiedPackages []string
|
||||||
|
|
||||||
|
NewSuites []*Suite
|
||||||
|
RemovedSuites []*Suite
|
||||||
|
modifiedSuites []*Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
type DescendingByDelta []*Suite
|
||||||
|
|
||||||
|
func (a DescendingByDelta) Len() int { return len(a) }
|
||||||
|
func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
|
||||||
|
|
||||||
|
func (d Delta) ModifiedSuites() []*Suite {
|
||||||
|
sort.Sort(DescendingByDelta(d.modifiedSuites))
|
||||||
|
return d.modifiedSuites
|
||||||
|
}
|
|
@ -0,0 +1,71 @@
|
||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SuiteErrors map[testsuite.TestSuite]error
|
||||||
|
|
||||||
|
type DeltaTracker struct {
|
||||||
|
maxDepth int
|
||||||
|
suites map[string]*Suite
|
||||||
|
packageHashes *PackageHashes
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDeltaTracker(maxDepth int) *DeltaTracker {
|
||||||
|
return &DeltaTracker{
|
||||||
|
maxDepth: maxDepth,
|
||||||
|
packageHashes: NewPackageHashes(),
|
||||||
|
suites: map[string]*Suite{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) {
|
||||||
|
errors = SuiteErrors{}
|
||||||
|
delta.ModifiedPackages = d.packageHashes.CheckForChanges()
|
||||||
|
|
||||||
|
providedSuitePaths := map[string]bool{}
|
||||||
|
for _, suite := range suites {
|
||||||
|
providedSuitePaths[suite.Path] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
d.packageHashes.StartTrackingUsage()
|
||||||
|
|
||||||
|
for _, suite := range d.suites {
|
||||||
|
if providedSuitePaths[suite.Suite.Path] {
|
||||||
|
if suite.Delta() > 0 {
|
||||||
|
delta.modifiedSuites = append(delta.modifiedSuites, suite)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
delta.RemovedSuites = append(delta.RemovedSuites, suite)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.packageHashes.StopTrackingUsageAndPrune()
|
||||||
|
|
||||||
|
for _, suite := range suites {
|
||||||
|
_, ok := d.suites[suite.Path]
|
||||||
|
if !ok {
|
||||||
|
s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
|
||||||
|
if err != nil {
|
||||||
|
errors[suite] = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
d.suites[suite.Path] = s
|
||||||
|
delta.NewSuites = append(delta.NewSuites, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return delta, errors
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error {
|
||||||
|
s, ok := d.suites[suite.Path]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown suite %s", suite.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
|
||||||
|
}
|
|
@ -0,0 +1,91 @@
|
||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go/build"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
|
||||||
|
|
||||||
|
type Dependencies struct {
|
||||||
|
deps map[string]int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDependencies(path string, maxDepth int) (Dependencies, error) {
|
||||||
|
d := Dependencies{
|
||||||
|
deps: map[string]int{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxDepth == 0 {
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := d.seedWithDepsForPackageAtPath(path)
|
||||||
|
if err != nil {
|
||||||
|
return d, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for depth := 1; depth < maxDepth; depth++ {
|
||||||
|
n := len(d.deps)
|
||||||
|
d.addDepsForDepth(depth)
|
||||||
|
if n == len(d.deps) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) Dependencies() map[string]int {
|
||||||
|
return d.deps
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
|
||||||
|
pkg, err := build.ImportDir(path, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.resolveAndAdd(pkg.Imports, 1)
|
||||||
|
d.resolveAndAdd(pkg.TestImports, 1)
|
||||||
|
d.resolveAndAdd(pkg.XTestImports, 1)
|
||||||
|
|
||||||
|
delete(d.deps, pkg.Dir)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) addDepsForDepth(depth int) {
|
||||||
|
for dep, depDepth := range d.deps {
|
||||||
|
if depDepth == depth {
|
||||||
|
d.addDepsForDep(dep, depth+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) addDepsForDep(dep string, depth int) {
|
||||||
|
pkg, err := build.ImportDir(dep, 0)
|
||||||
|
if err != nil {
|
||||||
|
println(err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.resolveAndAdd(pkg.Imports, depth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) resolveAndAdd(deps []string, depth int) {
|
||||||
|
for _, dep := range deps {
|
||||||
|
pkg, err := build.Import(dep, ".", 0)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pkg.Goroot == false && !ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) {
|
||||||
|
d.addDepIfNotPresent(pkg.Dir, depth)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
|
||||||
|
_, ok := d.deps[dep]
|
||||||
|
if !ok {
|
||||||
|
d.deps[dep] = depth
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,103 @@
|
||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var goRegExp = regexp.MustCompile(`\.go$`)
|
||||||
|
var goTestRegExp = regexp.MustCompile(`_test\.go$`)
|
||||||
|
|
||||||
|
type PackageHash struct {
|
||||||
|
CodeModifiedTime time.Time
|
||||||
|
TestModifiedTime time.Time
|
||||||
|
Deleted bool
|
||||||
|
|
||||||
|
path string
|
||||||
|
codeHash string
|
||||||
|
testHash string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPackageHash(path string) *PackageHash {
|
||||||
|
p := &PackageHash{
|
||||||
|
path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHash) CheckForChanges() bool {
|
||||||
|
codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
|
||||||
|
|
||||||
|
if deleted {
|
||||||
|
if p.Deleted == false {
|
||||||
|
t := time.Now()
|
||||||
|
p.CodeModifiedTime = t
|
||||||
|
p.TestModifiedTime = t
|
||||||
|
}
|
||||||
|
p.Deleted = true
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
modified := false
|
||||||
|
p.Deleted = false
|
||||||
|
|
||||||
|
if p.codeHash != codeHash {
|
||||||
|
p.CodeModifiedTime = codeModifiedTime
|
||||||
|
modified = true
|
||||||
|
}
|
||||||
|
if p.testHash != testHash {
|
||||||
|
p.TestModifiedTime = testModifiedTime
|
||||||
|
modified = true
|
||||||
|
}
|
||||||
|
|
||||||
|
p.codeHash = codeHash
|
||||||
|
p.testHash = testHash
|
||||||
|
return modified
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
|
||||||
|
infos, err := ioutil.ReadDir(p.path)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
deleted = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, info := range infos {
|
||||||
|
if info.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if goTestRegExp.Match([]byte(info.Name())) {
|
||||||
|
testHash += p.hashForFileInfo(info)
|
||||||
|
if info.ModTime().After(testModifiedTime) {
|
||||||
|
testModifiedTime = info.ModTime()
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if goRegExp.Match([]byte(info.Name())) {
|
||||||
|
codeHash += p.hashForFileInfo(info)
|
||||||
|
if info.ModTime().After(codeModifiedTime) {
|
||||||
|
codeModifiedTime = info.ModTime()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testHash += codeHash
|
||||||
|
if codeModifiedTime.After(testModifiedTime) {
|
||||||
|
testModifiedTime = codeModifiedTime
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
|
||||||
|
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PackageHashes struct {
|
||||||
|
PackageHashes map[string]*PackageHash
|
||||||
|
usedPaths map[string]bool
|
||||||
|
lock *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPackageHashes() *PackageHashes {
|
||||||
|
return &PackageHashes{
|
||||||
|
PackageHashes: map[string]*PackageHash{},
|
||||||
|
usedPaths: nil,
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHashes) CheckForChanges() []string {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
modified := []string{}
|
||||||
|
|
||||||
|
for _, packageHash := range p.PackageHashes {
|
||||||
|
if packageHash.CheckForChanges() {
|
||||||
|
modified = append(modified, packageHash.path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return modified
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHashes) Add(path string) *PackageHash {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
path, _ = filepath.Abs(path)
|
||||||
|
_, ok := p.PackageHashes[path]
|
||||||
|
if !ok {
|
||||||
|
p.PackageHashes[path] = NewPackageHash(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.usedPaths != nil {
|
||||||
|
p.usedPaths[path] = true
|
||||||
|
}
|
||||||
|
return p.PackageHashes[path]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHashes) Get(path string) *PackageHash {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
path, _ = filepath.Abs(path)
|
||||||
|
if p.usedPaths != nil {
|
||||||
|
p.usedPaths[path] = true
|
||||||
|
}
|
||||||
|
return p.PackageHashes[path]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHashes) StartTrackingUsage() {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
p.usedPaths = map[string]bool{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PackageHashes) StopTrackingUsageAndPrune() {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
for path := range p.PackageHashes {
|
||||||
|
if !p.usedPaths[path] {
|
||||||
|
delete(p.PackageHashes, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.usedPaths = nil
|
||||||
|
}
|
|
@ -0,0 +1,87 @@
|
||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Suite struct {
|
||||||
|
Suite testsuite.TestSuite
|
||||||
|
RunTime time.Time
|
||||||
|
Dependencies Dependencies
|
||||||
|
|
||||||
|
sharedPackageHashes *PackageHashes
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
|
||||||
|
deps, err := NewDependencies(suite.Path, maxDepth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sharedPackageHashes.Add(suite.Path)
|
||||||
|
for dep := range deps.Dependencies() {
|
||||||
|
sharedPackageHashes.Add(dep)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Suite{
|
||||||
|
Suite: suite,
|
||||||
|
Dependencies: deps,
|
||||||
|
|
||||||
|
sharedPackageHashes: sharedPackageHashes,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) Delta() float64 {
|
||||||
|
delta := s.delta(s.Suite.Path, true, 0) * 1000
|
||||||
|
for dep, depth := range s.Dependencies.Dependencies() {
|
||||||
|
delta += s.delta(dep, false, depth)
|
||||||
|
}
|
||||||
|
return delta
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
|
||||||
|
s.RunTime = time.Now()
|
||||||
|
|
||||||
|
deps, err := NewDependencies(s.Suite.Path, maxDepth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.sharedPackageHashes.Add(s.Suite.Path)
|
||||||
|
for dep := range deps.Dependencies() {
|
||||||
|
s.sharedPackageHashes.Add(dep)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Dependencies = deps
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) Description() string {
|
||||||
|
numDeps := len(s.Dependencies.Dependencies())
|
||||||
|
pluralizer := "ies"
|
||||||
|
if numDeps == 1 {
|
||||||
|
pluralizer = "y"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
|
||||||
|
return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
|
||||||
|
packageHash := s.sharedPackageHashes.Get(packagePath)
|
||||||
|
var modifiedTime time.Time
|
||||||
|
if includeTests {
|
||||||
|
modifiedTime = packageHash.TestModifiedTime
|
||||||
|
} else {
|
||||||
|
modifiedTime = packageHash.CodeModifiedTime
|
||||||
|
}
|
||||||
|
|
||||||
|
return modifiedTime.Sub(s.RunTime)
|
||||||
|
}
|
|
@ -0,0 +1,172 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||||
|
"github.com/onsi/ginkgo/ginkgo/watch"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildWatchCommand() *Command {
|
||||||
|
commandFlags := NewWatchCommandFlags(flag.NewFlagSet("watch", flag.ExitOnError))
|
||||||
|
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||||
|
notifier := NewNotifier(commandFlags)
|
||||||
|
watcher := &SpecWatcher{
|
||||||
|
commandFlags: commandFlags,
|
||||||
|
notifier: notifier,
|
||||||
|
interruptHandler: interruptHandler,
|
||||||
|
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Command{
|
||||||
|
Name: "watch",
|
||||||
|
FlagSet: commandFlags.FlagSet,
|
||||||
|
UsageCommand: "ginkgo watch <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||||
|
Usage: []string{
|
||||||
|
"Watches the tests in the passed in <PACKAGES> and runs them when changes occur.",
|
||||||
|
"Any arguments after -- will be passed to the test.",
|
||||||
|
},
|
||||||
|
Command: watcher.WatchSpecs,
|
||||||
|
SuppressFlagDocumentation: true,
|
||||||
|
FlagDocSubstitute: []string{
|
||||||
|
"Accepts all the flags that the ginkgo command accepts except for --keepGoing and --untilItFails",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SpecWatcher struct {
|
||||||
|
commandFlags *RunWatchAndBuildCommandFlags
|
||||||
|
notifier *Notifier
|
||||||
|
interruptHandler *interrupthandler.InterruptHandler
|
||||||
|
suiteRunner *SuiteRunner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
|
||||||
|
w.commandFlags.computeNodes()
|
||||||
|
w.notifier.VerifyNotificationsAreAvailable()
|
||||||
|
|
||||||
|
w.WatchSuites(args, additionalArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalArgs []string) []*testrunner.TestRunner {
|
||||||
|
runners := []*testrunner.TestRunner{}
|
||||||
|
|
||||||
|
for _, suite := range suites {
|
||||||
|
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, additionalArgs))
|
||||||
|
}
|
||||||
|
|
||||||
|
return runners
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) {
|
||||||
|
suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
|
||||||
|
|
||||||
|
if len(suites) == 0 {
|
||||||
|
complainAndQuit("Found no test suites")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth)
|
||||||
|
deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth)
|
||||||
|
delta, errors := deltaTracker.Delta(suites)
|
||||||
|
|
||||||
|
fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||||
|
for _, suite := range delta.NewSuites {
|
||||||
|
fmt.Println(" " + suite.Description())
|
||||||
|
}
|
||||||
|
|
||||||
|
for suite, err := range errors {
|
||||||
|
fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suites) == 1 {
|
||||||
|
runners := w.runnersForSuites(suites, additionalArgs)
|
||||||
|
w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, nil)
|
||||||
|
runners[0].CleanUp()
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(time.Second)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
|
||||||
|
delta, _ := deltaTracker.Delta(suites)
|
||||||
|
|
||||||
|
suitesToRun := []testsuite.TestSuite{}
|
||||||
|
|
||||||
|
if len(delta.NewSuites) > 0 {
|
||||||
|
fmt.Printf(greenColor+"Detected %d new %s:\n"+defaultStyle, len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||||
|
for _, suite := range delta.NewSuites {
|
||||||
|
suitesToRun = append(suitesToRun, suite.Suite)
|
||||||
|
fmt.Println(" " + suite.Description())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
modifiedSuites := delta.ModifiedSuites()
|
||||||
|
if len(modifiedSuites) > 0 {
|
||||||
|
fmt.Println(greenColor + "\nDetected changes in:" + defaultStyle)
|
||||||
|
for _, pkg := range delta.ModifiedPackages {
|
||||||
|
fmt.Println(" " + pkg)
|
||||||
|
}
|
||||||
|
fmt.Printf(greenColor+"Will run %d %s:\n"+defaultStyle, len(modifiedSuites), pluralizedWord("suite", "suites", len(modifiedSuites)))
|
||||||
|
for _, suite := range modifiedSuites {
|
||||||
|
suitesToRun = append(suitesToRun, suite.Suite)
|
||||||
|
fmt.Println(" " + suite.Description())
|
||||||
|
}
|
||||||
|
fmt.Println("")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(suitesToRun) > 0 {
|
||||||
|
w.UpdateSeed()
|
||||||
|
w.ComputeSuccinctMode(len(suitesToRun))
|
||||||
|
runners := w.runnersForSuites(suitesToRun, additionalArgs)
|
||||||
|
result, _ := w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, func(suite testsuite.TestSuite) {
|
||||||
|
deltaTracker.WillRun(suite)
|
||||||
|
})
|
||||||
|
for _, runner := range runners {
|
||||||
|
runner.CleanUp()
|
||||||
|
}
|
||||||
|
if !w.interruptHandler.WasInterrupted() {
|
||||||
|
color := redColor
|
||||||
|
if result.Passed {
|
||||||
|
color = greenColor
|
||||||
|
}
|
||||||
|
fmt.Println(color + "\nDone. Resuming watch..." + defaultStyle)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-w.interruptHandler.C:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SpecWatcher) ComputeSuccinctMode(numSuites int) {
|
||||||
|
if config.DefaultReporterConfig.Verbose {
|
||||||
|
config.DefaultReporterConfig.Succinct = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.commandFlags.wasSet("succinct") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if numSuites == 1 {
|
||||||
|
config.DefaultReporterConfig.Succinct = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if numSuites > 1 {
|
||||||
|
config.DefaultReporterConfig.Succinct = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *SpecWatcher) UpdateSeed() {
|
||||||
|
if !w.commandFlags.wasSet("seed") {
|
||||||
|
config.GinkgoConfig.RandomSeed = time.Now().Unix()
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,558 @@
|
||||||
|
/*
|
||||||
|
Ginkgo is a BDD-style testing framework for Golang
|
||||||
|
|
||||||
|
The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
|
||||||
|
|
||||||
|
Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
|
||||||
|
|
||||||
|
Ginkgo on Github: http://github.com/onsi/ginkgo
|
||||||
|
|
||||||
|
Ginkgo is MIT-Licensed
|
||||||
|
*/
|
||||||
|
package ginkgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/internal/codelocation"
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/internal/remote"
|
||||||
|
"github.com/onsi/ginkgo/internal/suite"
|
||||||
|
"github.com/onsi/ginkgo/internal/testingtproxy"
|
||||||
|
"github.com/onsi/ginkgo/internal/writer"
|
||||||
|
"github.com/onsi/ginkgo/reporters"
|
||||||
|
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const GINKGO_VERSION = config.VERSION
|
||||||
|
const GINKGO_PANIC = `
|
||||||
|
Your test failed.
|
||||||
|
Ginkgo panics to prevent subsequent assertions from running.
|
||||||
|
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||||
|
|
||||||
|
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||||
|
To circumvent this, you should call
|
||||||
|
|
||||||
|
defer GinkgoRecover()
|
||||||
|
|
||||||
|
at the top of the goroutine that caused this panic.
|
||||||
|
`
|
||||||
|
const defaultTimeout = 1
|
||||||
|
|
||||||
|
var globalSuite *suite.Suite
|
||||||
|
var globalFailer *failer.Failer
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
config.Flags(flag.CommandLine, "ginkgo", true)
|
||||||
|
GinkgoWriter = writer.New(os.Stdout)
|
||||||
|
globalFailer = failer.New()
|
||||||
|
globalSuite = suite.New(globalFailer)
|
||||||
|
}
|
||||||
|
|
||||||
|
//GinkgoWriter implements an io.Writer
|
||||||
|
//When running in verbose mode any writes to GinkgoWriter will be immediately printed
|
||||||
|
//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
|
||||||
|
//only if the current test fails.
|
||||||
|
var GinkgoWriter io.Writer
|
||||||
|
|
||||||
|
//The interface by which Ginkgo receives *testing.T
|
||||||
|
type GinkgoTestingT interface {
|
||||||
|
Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
//GinkgoParallelNode returns the parallel node number for the current ginkgo process
|
||||||
|
//The node number is 1-indexed
|
||||||
|
func GinkgoParallelNode() int {
|
||||||
|
return config.GinkgoConfig.ParallelNode
|
||||||
|
}
|
||||||
|
|
||||||
|
//Some matcher libraries or legacy codebases require a *testing.T
|
||||||
|
//GinkgoT implements an interface analogous to *testing.T and can be used if
|
||||||
|
//the library in question accepts *testing.T through an interface
|
||||||
|
//
|
||||||
|
// For example, with testify:
|
||||||
|
// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
|
||||||
|
//
|
||||||
|
// Or with gomock:
|
||||||
|
// gomock.NewController(GinkgoT())
|
||||||
|
//
|
||||||
|
// GinkgoT() takes an optional offset argument that can be used to get the
|
||||||
|
// correct line number associated with the failure.
|
||||||
|
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
|
||||||
|
offset := 3
|
||||||
|
if len(optionalOffset) > 0 {
|
||||||
|
offset = optionalOffset[0]
|
||||||
|
}
|
||||||
|
return testingtproxy.New(GinkgoWriter, Fail, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
//The interface returned by GinkgoT(). This covers most of the methods
|
||||||
|
//in the testing package's T.
|
||||||
|
type GinkgoTInterface interface {
|
||||||
|
Fail()
|
||||||
|
Error(args ...interface{})
|
||||||
|
Errorf(format string, args ...interface{})
|
||||||
|
FailNow()
|
||||||
|
Fatal(args ...interface{})
|
||||||
|
Fatalf(format string, args ...interface{})
|
||||||
|
Log(args ...interface{})
|
||||||
|
Logf(format string, args ...interface{})
|
||||||
|
Failed() bool
|
||||||
|
Parallel()
|
||||||
|
Skip(args ...interface{})
|
||||||
|
Skipf(format string, args ...interface{})
|
||||||
|
SkipNow()
|
||||||
|
Skipped() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
//Custom Ginkgo test reporters must implement the Reporter interface.
|
||||||
|
//
|
||||||
|
//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
|
||||||
|
//and a SpecSummary just before a spec begins and just after a spec ends
|
||||||
|
type Reporter reporters.Reporter
|
||||||
|
|
||||||
|
//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
|
||||||
|
//to tell Ginkgo that your async test is done.
|
||||||
|
type Done chan<- interface{}
|
||||||
|
|
||||||
|
//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
|
||||||
|
// FullTestText: a concatenation of ComponentTexts and the TestText
|
||||||
|
// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
|
||||||
|
// TestText: the text in the actual It or Measure node
|
||||||
|
// IsMeasurement: true if the current test is a measurement
|
||||||
|
// FileName: the name of the file containing the current test
|
||||||
|
// LineNumber: the line number for the current test
|
||||||
|
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
|
||||||
|
type GinkgoTestDescription struct {
|
||||||
|
FullTestText string
|
||||||
|
ComponentTexts []string
|
||||||
|
TestText string
|
||||||
|
|
||||||
|
IsMeasurement bool
|
||||||
|
|
||||||
|
FileName string
|
||||||
|
LineNumber int
|
||||||
|
|
||||||
|
Failed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
//CurrentGinkgoTestDescripton returns information about the current running test.
|
||||||
|
func CurrentGinkgoTestDescription() GinkgoTestDescription {
|
||||||
|
summary, ok := globalSuite.CurrentRunningSpecSummary()
|
||||||
|
if !ok {
|
||||||
|
return GinkgoTestDescription{}
|
||||||
|
}
|
||||||
|
|
||||||
|
subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
|
||||||
|
|
||||||
|
return GinkgoTestDescription{
|
||||||
|
ComponentTexts: summary.ComponentTexts[1:],
|
||||||
|
FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
|
||||||
|
TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
|
||||||
|
IsMeasurement: summary.IsMeasurement,
|
||||||
|
FileName: subjectCodeLocation.FileName,
|
||||||
|
LineNumber: subjectCodeLocation.LineNumber,
|
||||||
|
Failed: summary.HasFailureState(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Measurement tests receive a Benchmarker.
|
||||||
|
//
|
||||||
|
//You use the Time() function to time how long the passed in body function takes to run
|
||||||
|
//You use the RecordValue() function to track arbitrary numerical measurements.
|
||||||
|
//The optional info argument is passed to the test reporter and can be used to
|
||||||
|
// provide the measurement data to a custom reporter with context.
|
||||||
|
//
|
||||||
|
//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
|
||||||
|
type Benchmarker interface {
|
||||||
|
Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
|
||||||
|
RecordValue(name string, value float64, info ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
//RunSpecs is the entry point for the Ginkgo test runner.
|
||||||
|
//You must call this within a Golang testing TestX(t *testing.T) function.
|
||||||
|
//
|
||||||
|
//To bootstrap a test suite you can use the Ginkgo CLI:
|
||||||
|
//
|
||||||
|
// ginkgo bootstrap
|
||||||
|
func RunSpecs(t GinkgoTestingT, description string) bool {
|
||||||
|
specReporters := []Reporter{buildDefaultReporter()}
|
||||||
|
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||||
|
}
|
||||||
|
|
||||||
|
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
|
||||||
|
//RunSpecs() with this method.
|
||||||
|
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||||
|
specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...)
|
||||||
|
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||||
|
}
|
||||||
|
|
||||||
|
//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
|
||||||
|
//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
|
||||||
|
func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||||
|
writer := GinkgoWriter.(*writer.Writer)
|
||||||
|
writer.SetStream(config.DefaultReporterConfig.Verbose)
|
||||||
|
reporters := make([]reporters.Reporter, len(specReporters))
|
||||||
|
for i, reporter := range specReporters {
|
||||||
|
reporters[i] = reporter
|
||||||
|
}
|
||||||
|
passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
|
||||||
|
if passed && hasFocusedTests {
|
||||||
|
fmt.Println("PASS | FOCUSED")
|
||||||
|
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
}
|
||||||
|
return passed
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildDefaultReporter() Reporter {
|
||||||
|
remoteReportingServer := config.GinkgoConfig.StreamHost
|
||||||
|
if remoteReportingServer == "" {
|
||||||
|
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
|
||||||
|
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
|
||||||
|
} else {
|
||||||
|
return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Skip notifies Ginkgo that the current spec should be skipped.
|
||||||
|
func Skip(message string, callerSkip ...int) {
|
||||||
|
skip := 0
|
||||||
|
if len(callerSkip) > 0 {
|
||||||
|
skip = callerSkip[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
globalFailer.Skip(message, codelocation.New(skip+1))
|
||||||
|
panic(GINKGO_PANIC)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
|
||||||
|
func Fail(message string, callerSkip ...int) {
|
||||||
|
skip := 0
|
||||||
|
if len(callerSkip) > 0 {
|
||||||
|
skip = callerSkip[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
globalFailer.Fail(message, codelocation.New(skip+1))
|
||||||
|
panic(GINKGO_PANIC)
|
||||||
|
}
|
||||||
|
|
||||||
|
//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
||||||
|
//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
||||||
|
//calls out to Gomega
|
||||||
|
//
|
||||||
|
//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
|
||||||
|
//further assertions from running. This panic must be recovered. Ginkgo does this for you
|
||||||
|
//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
|
||||||
|
//
|
||||||
|
//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
|
||||||
|
//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
|
||||||
|
func GinkgoRecover() {
|
||||||
|
e := recover()
|
||||||
|
if e != nil {
|
||||||
|
globalFailer.Panic(codelocation.New(1), e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
|
||||||
|
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||||
|
//
|
||||||
|
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
|
||||||
|
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||||
|
//or method and, within that Describe, outline a number of Contexts.
|
||||||
|
func Describe(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus the tests within a describe block using FDescribe
|
||||||
|
func FDescribe(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark the tests within a describe block as pending using PDescribe
|
||||||
|
func PDescribe(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark the tests within a describe block as pending using XDescribe
|
||||||
|
func XDescribe(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//Context blocks allow you to organize your specs. A Context block can contain any number of
|
||||||
|
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||||
|
//
|
||||||
|
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
|
||||||
|
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||||
|
//or method and, within that Describe, outline a number of Contexts.
|
||||||
|
func Context(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus the tests within a describe block using FContext
|
||||||
|
func FContext(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark the tests within a describe block as pending using PContext
|
||||||
|
func PContext(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark the tests within a describe block as pending using XContext
|
||||||
|
func XContext(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
|
||||||
|
//within an It block.
|
||||||
|
//
|
||||||
|
//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
|
||||||
|
//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
|
||||||
|
func It(text string, body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus individual Its using FIt
|
||||||
|
func FIt(text string, body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Its as pending using PIt
|
||||||
|
func PIt(text string, _ ...interface{}) bool {
|
||||||
|
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Its as pending using XIt
|
||||||
|
func XIt(text string, _ ...interface{}) bool {
|
||||||
|
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//Specify blocks are aliases for It blocks and allow for more natural wording in situations
|
||||||
|
//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
|
||||||
|
//which apply to It blocks.
|
||||||
|
func Specify(text string, body interface{}, timeout ...float64) bool {
|
||||||
|
return It(text, body, timeout...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus individual Specifys using FSpecify
|
||||||
|
func FSpecify(text string, body interface{}, timeout ...float64) bool {
|
||||||
|
return FIt(text, body, timeout...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Specifys as pending using PSpecify
|
||||||
|
func PSpecify(text string, is ...interface{}) bool {
|
||||||
|
return PIt(text, is...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Specifys as pending using XSpecify
|
||||||
|
func XSpecify(text string, is ...interface{}) bool {
|
||||||
|
return XIt(text, is...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//By allows you to better document large Its.
|
||||||
|
//
|
||||||
|
//Generally you should try to keep your Its short and to the point. This is not always possible, however,
|
||||||
|
//especially in the context of integration tests that capture a particular workflow.
|
||||||
|
//
|
||||||
|
//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
|
||||||
|
//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
|
||||||
|
func By(text string, callbacks ...func()) {
|
||||||
|
preamble := "\x1b[1mSTEP\x1b[0m"
|
||||||
|
if config.DefaultReporterConfig.NoColor {
|
||||||
|
preamble = "STEP"
|
||||||
|
}
|
||||||
|
fmt.Fprintln(GinkgoWriter, preamble+": "+text)
|
||||||
|
if len(callbacks) == 1 {
|
||||||
|
callbacks[0]()
|
||||||
|
}
|
||||||
|
if len(callbacks) > 1 {
|
||||||
|
panic("just one callback per By, please")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
|
||||||
|
//and accumulate metrics provided to the Benchmarker by the body function.
|
||||||
|
//
|
||||||
|
//The body function must have the signature:
|
||||||
|
// func(b Benchmarker)
|
||||||
|
func Measure(text string, body interface{}, samples int) bool {
|
||||||
|
globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus individual Measures using FMeasure
|
||||||
|
func FMeasure(text string, body interface{}, samples int) bool {
|
||||||
|
globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Maeasurements as pending using PMeasure
|
||||||
|
func PMeasure(text string, _ ...interface{}) bool {
|
||||||
|
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Maeasurements as pending using XMeasure
|
||||||
|
func XMeasure(text string, _ ...interface{}) bool {
|
||||||
|
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
|
||||||
|
//parallel node process will call BeforeSuite.
|
||||||
|
//
|
||||||
|
//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||||
|
//
|
||||||
|
//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||||
|
func BeforeSuite(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
|
||||||
|
//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
|
||||||
|
//
|
||||||
|
//When running in parallel, each parallel node process will call AfterSuite.
|
||||||
|
//
|
||||||
|
//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||||
|
//
|
||||||
|
//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||||
|
func AfterSuite(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
|
||||||
|
//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
|
||||||
|
//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
|
||||||
|
//until that node is done before running.
|
||||||
|
//
|
||||||
|
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
|
||||||
|
//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
|
||||||
|
//to the second function (on all the other nodes).
|
||||||
|
//
|
||||||
|
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
|
||||||
|
//
|
||||||
|
// func() []byte
|
||||||
|
//
|
||||||
|
//or, to run asynchronously:
|
||||||
|
//
|
||||||
|
// func(done Done) []byte
|
||||||
|
//
|
||||||
|
//The byte array returned by the first function is then passed to the second function, which has the signature:
|
||||||
|
//
|
||||||
|
// func(data []byte)
|
||||||
|
//
|
||||||
|
//or, to run asynchronously:
|
||||||
|
//
|
||||||
|
// func(data []byte, done Done)
|
||||||
|
//
|
||||||
|
//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
|
||||||
|
//
|
||||||
|
// var dbClient db.Client
|
||||||
|
// var dbRunner db.Runner
|
||||||
|
//
|
||||||
|
// var _ = SynchronizedBeforeSuite(func() []byte {
|
||||||
|
// dbRunner = db.NewRunner()
|
||||||
|
// err := dbRunner.Start()
|
||||||
|
// Ω(err).ShouldNot(HaveOccurred())
|
||||||
|
// return []byte(dbRunner.URL)
|
||||||
|
// }, func(data []byte) {
|
||||||
|
// dbClient = db.NewClient()
|
||||||
|
// err := dbClient.Connect(string(data))
|
||||||
|
// Ω(err).ShouldNot(HaveOccurred())
|
||||||
|
// })
|
||||||
|
func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.SetSynchronizedBeforeSuiteNode(
|
||||||
|
node1Body,
|
||||||
|
allNodesBody,
|
||||||
|
codelocation.New(1),
|
||||||
|
parseTimeout(timeout...),
|
||||||
|
)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
|
||||||
|
//external singleton resources shared across nodes when running tests in parallel.
|
||||||
|
//
|
||||||
|
//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
|
||||||
|
//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
|
||||||
|
//all other nodes are finished.
|
||||||
|
//
|
||||||
|
//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
|
||||||
|
//
|
||||||
|
//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
|
||||||
|
//only after all nodes have finished:
|
||||||
|
//
|
||||||
|
// var _ = SynchronizedAfterSuite(func() {
|
||||||
|
// dbClient.Cleanup()
|
||||||
|
// }, func() {
|
||||||
|
// dbRunner.Stop()
|
||||||
|
// })
|
||||||
|
func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.SetSynchronizedAfterSuiteNode(
|
||||||
|
allNodesBody,
|
||||||
|
node1Body,
|
||||||
|
codelocation.New(1),
|
||||||
|
parseTimeout(timeout...),
|
||||||
|
)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
|
||||||
|
//Describe and Context blocks the outermost BeforeEach blocks are run first.
|
||||||
|
//
|
||||||
|
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||||
|
//a Done channel
|
||||||
|
func BeforeEach(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
|
||||||
|
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
|
||||||
|
//
|
||||||
|
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||||
|
//a Done channel
|
||||||
|
func JustBeforeEach(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
|
||||||
|
//Describe and Context blocks the innermost AfterEach blocks are run first.
|
||||||
|
//
|
||||||
|
//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
|
||||||
|
//a Done channel
|
||||||
|
func AfterEach(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTimeout(timeout ...float64) time.Duration {
|
||||||
|
if len(timeout) == 0 {
|
||||||
|
return time.Duration(defaultTimeout * int64(time.Second))
|
||||||
|
} else {
|
||||||
|
return time.Duration(timeout[0] * float64(time.Second))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
package integration
|
32
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
32
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
package codelocation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func New(skip int) types.CodeLocation {
|
||||||
|
_, file, line, _ := runtime.Caller(skip + 1)
|
||||||
|
stackTrace := PruneStack(string(debug.Stack()), skip)
|
||||||
|
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||||
|
}
|
||||||
|
|
||||||
|
func PruneStack(fullStackTrace string, skip int) string {
|
||||||
|
stack := strings.Split(fullStackTrace, "\n")
|
||||||
|
if len(stack) > 2*(skip+1) {
|
||||||
|
stack = stack[2*(skip+1):]
|
||||||
|
}
|
||||||
|
prunedStack := []string{}
|
||||||
|
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||||
|
for i := 0; i < len(stack)/2; i++ {
|
||||||
|
if !re.Match([]byte(stack[i*2])) {
|
||||||
|
prunedStack = append(prunedStack, stack[i*2])
|
||||||
|
prunedStack = append(prunedStack, stack[i*2+1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(prunedStack, "\n")
|
||||||
|
}
|
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
||||||
|
package containernode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type subjectOrContainerNode struct {
|
||||||
|
containerNode *ContainerNode
|
||||||
|
subjectNode leafnodes.SubjectNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n subjectOrContainerNode) text() string {
|
||||||
|
if n.containerNode != nil {
|
||||||
|
return n.containerNode.Text()
|
||||||
|
} else {
|
||||||
|
return n.subjectNode.Text()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type CollatedNodes struct {
|
||||||
|
Containers []*ContainerNode
|
||||||
|
Subject leafnodes.SubjectNode
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerNode struct {
|
||||||
|
text string
|
||||||
|
flag types.FlagType
|
||||||
|
codeLocation types.CodeLocation
|
||||||
|
|
||||||
|
setupNodes []leafnodes.BasicNode
|
||||||
|
subjectAndContainerNodes []subjectOrContainerNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
|
||||||
|
return &ContainerNode{
|
||||||
|
text: text,
|
||||||
|
flag: flag,
|
||||||
|
codeLocation: codeLocation,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (container *ContainerNode) Shuffle(r *rand.Rand) {
|
||||||
|
sort.Sort(container)
|
||||||
|
permutation := r.Perm(len(container.subjectAndContainerNodes))
|
||||||
|
shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
|
||||||
|
for i, j := range permutation {
|
||||||
|
shuffledNodes[i] = container.subjectAndContainerNodes[j]
|
||||||
|
}
|
||||||
|
container.subjectAndContainerNodes = shuffledNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
|
||||||
|
if node.flag == types.FlagTypePending {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldUnfocus := false
|
||||||
|
for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
|
||||||
|
if subjectOrContainerNode.containerNode != nil {
|
||||||
|
shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
|
||||||
|
} else {
|
||||||
|
shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if shouldUnfocus {
|
||||||
|
if node.flag == types.FlagTypeFocused {
|
||||||
|
node.flag = types.FlagTypeNone
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return node.flag == types.FlagTypeFocused
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) Collate() []CollatedNodes {
|
||||||
|
return node.collate([]*ContainerNode{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
|
||||||
|
collated := make([]CollatedNodes, 0)
|
||||||
|
|
||||||
|
containers := make([]*ContainerNode, len(enclosingContainers))
|
||||||
|
copy(containers, enclosingContainers)
|
||||||
|
containers = append(containers, node)
|
||||||
|
|
||||||
|
for _, subjectOrContainer := range node.subjectAndContainerNodes {
|
||||||
|
if subjectOrContainer.containerNode != nil {
|
||||||
|
collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
|
||||||
|
} else {
|
||||||
|
collated = append(collated, CollatedNodes{
|
||||||
|
Containers: containers,
|
||||||
|
Subject: subjectOrContainer.subjectNode,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return collated
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
|
||||||
|
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
|
||||||
|
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
|
||||||
|
node.setupNodes = append(node.setupNodes, setupNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
|
||||||
|
nodes := []leafnodes.BasicNode{}
|
||||||
|
for _, setupNode := range node.setupNodes {
|
||||||
|
if setupNode.Type() == nodeType {
|
||||||
|
nodes = append(nodes, setupNode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) Text() string {
|
||||||
|
return node.text
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) CodeLocation() types.CodeLocation {
|
||||||
|
return node.codeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) Flag() types.FlagType {
|
||||||
|
return node.flag
|
||||||
|
}
|
||||||
|
|
||||||
|
//sort.Interface
|
||||||
|
|
||||||
|
func (node *ContainerNode) Len() int {
|
||||||
|
return len(node.subjectAndContainerNodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) Less(i, j int) bool {
|
||||||
|
return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) Swap(i, j int) {
|
||||||
|
node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
|
||||||
|
}
|
|
@ -0,0 +1,92 @@
|
||||||
|
package failer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Failer struct {
|
||||||
|
lock *sync.Mutex
|
||||||
|
failure types.SpecFailure
|
||||||
|
state types.SpecState
|
||||||
|
}
|
||||||
|
|
||||||
|
func New() *Failer {
|
||||||
|
return &Failer{
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
state: types.SpecStatePassed,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStatePanicked
|
||||||
|
f.failure = types.SpecFailure{
|
||||||
|
Message: "Test Panicked",
|
||||||
|
Location: location,
|
||||||
|
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Timeout(location types.CodeLocation) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStateTimedOut
|
||||||
|
f.failure = types.SpecFailure{
|
||||||
|
Message: "Timed out",
|
||||||
|
Location: location,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Fail(message string, location types.CodeLocation) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStateFailed
|
||||||
|
f.failure = types.SpecFailure{
|
||||||
|
Message: message,
|
||||||
|
Location: location,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
failure := f.failure
|
||||||
|
outcome := f.state
|
||||||
|
if outcome != types.SpecStatePassed {
|
||||||
|
failure.ComponentType = componentType
|
||||||
|
failure.ComponentIndex = componentIndex
|
||||||
|
failure.ComponentCodeLocation = componentCodeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
f.state = types.SpecStatePassed
|
||||||
|
f.failure = types.SpecFailure{}
|
||||||
|
|
||||||
|
return failure, outcome
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Skip(message string, location types.CodeLocation) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStateSkipped
|
||||||
|
f.failure = types.SpecFailure{
|
||||||
|
Message: message,
|
||||||
|
Location: location,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,95 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type benchmarker struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
measurements map[string]*types.SpecMeasurement
|
||||||
|
orderCounter int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBenchmarker() *benchmarker {
|
||||||
|
return &benchmarker{
|
||||||
|
measurements: make(map[string]*types.SpecMeasurement, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
|
||||||
|
t := time.Now()
|
||||||
|
body()
|
||||||
|
elapsedTime = time.Since(t)
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", info...)
|
||||||
|
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
|
||||||
|
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", info...)
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
measurement.Results = append(measurement.Results, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement {
|
||||||
|
measurement, ok := b.measurements[name]
|
||||||
|
if !ok {
|
||||||
|
var computedInfo interface{}
|
||||||
|
computedInfo = nil
|
||||||
|
if len(info) > 0 {
|
||||||
|
computedInfo = info[0]
|
||||||
|
}
|
||||||
|
measurement = &types.SpecMeasurement{
|
||||||
|
Name: name,
|
||||||
|
Info: computedInfo,
|
||||||
|
Order: b.orderCounter,
|
||||||
|
SmallestLabel: smallestLabel,
|
||||||
|
LargestLabel: largestLabel,
|
||||||
|
AverageLabel: averageLabel,
|
||||||
|
Units: units,
|
||||||
|
Results: make([]float64, 0),
|
||||||
|
}
|
||||||
|
b.measurements[name] = measurement
|
||||||
|
b.orderCounter++
|
||||||
|
}
|
||||||
|
|
||||||
|
return measurement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
for _, measurement := range b.measurements {
|
||||||
|
measurement.Smallest = math.MaxFloat64
|
||||||
|
measurement.Largest = -math.MaxFloat64
|
||||||
|
sum := float64(0)
|
||||||
|
sumOfSquares := float64(0)
|
||||||
|
|
||||||
|
for _, result := range measurement.Results {
|
||||||
|
if result > measurement.Largest {
|
||||||
|
measurement.Largest = result
|
||||||
|
}
|
||||||
|
if result < measurement.Smallest {
|
||||||
|
measurement.Smallest = result
|
||||||
|
}
|
||||||
|
sum += result
|
||||||
|
sumOfSquares += result * result
|
||||||
|
}
|
||||||
|
|
||||||
|
n := float64(len(measurement.Results))
|
||||||
|
measurement.Average = sum / n
|
||||||
|
measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.measurements
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BasicNode interface {
|
||||||
|
Type() types.SpecComponentType
|
||||||
|
Run() (types.SpecState, types.SpecFailure)
|
||||||
|
CodeLocation() types.CodeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
type SubjectNode interface {
|
||||||
|
BasicNode
|
||||||
|
|
||||||
|
Text() string
|
||||||
|
Flag() types.FlagType
|
||||||
|
Samples() int
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ItNode struct {
|
||||||
|
runner *runner
|
||||||
|
|
||||||
|
flag types.FlagType
|
||||||
|
text string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
|
||||||
|
return &ItNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
|
||||||
|
flag: flag,
|
||||||
|
text: text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
return node.runner.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) Type() types.SpecComponentType {
|
||||||
|
return types.SpecComponentTypeIt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) Text() string {
|
||||||
|
return node.text
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) Flag() types.FlagType {
|
||||||
|
return node.flag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) CodeLocation() types.CodeLocation {
|
||||||
|
return node.runner.codeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) Samples() int {
|
||||||
|
return 1
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MeasureNode struct {
|
||||||
|
runner *runner
|
||||||
|
|
||||||
|
text string
|
||||||
|
flag types.FlagType
|
||||||
|
samples int
|
||||||
|
benchmarker *benchmarker
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
|
||||||
|
benchmarker := newBenchmarker()
|
||||||
|
|
||||||
|
wrappedBody := func() {
|
||||||
|
reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MeasureNode{
|
||||||
|
runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
|
||||||
|
|
||||||
|
text: text,
|
||||||
|
flag: flag,
|
||||||
|
samples: samples,
|
||||||
|
benchmarker: benchmarker,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
return node.runner.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
|
||||||
|
return node.benchmarker.measurementsReport()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) Type() types.SpecComponentType {
|
||||||
|
return types.SpecComponentTypeMeasure
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) Text() string {
|
||||||
|
return node.text
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) Flag() types.FlagType {
|
||||||
|
return node.flag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) CodeLocation() types.CodeLocation {
|
||||||
|
return node.runner.codeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) Samples() int {
|
||||||
|
return node.samples
|
||||||
|
}
|
|
@ -0,0 +1,113 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/onsi/ginkgo/internal/codelocation"
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type runner struct {
|
||||||
|
isAsync bool
|
||||||
|
asyncFunc func(chan<- interface{})
|
||||||
|
syncFunc func()
|
||||||
|
codeLocation types.CodeLocation
|
||||||
|
timeoutThreshold time.Duration
|
||||||
|
nodeType types.SpecComponentType
|
||||||
|
componentIndex int
|
||||||
|
failer *failer.Failer
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
|
||||||
|
bodyType := reflect.TypeOf(body)
|
||||||
|
if bodyType.Kind() != reflect.Func {
|
||||||
|
panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
|
||||||
|
}
|
||||||
|
|
||||||
|
runner := &runner{
|
||||||
|
codeLocation: codeLocation,
|
||||||
|
timeoutThreshold: timeout,
|
||||||
|
failer: failer,
|
||||||
|
nodeType: nodeType,
|
||||||
|
componentIndex: componentIndex,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch bodyType.NumIn() {
|
||||||
|
case 0:
|
||||||
|
runner.syncFunc = body.(func())
|
||||||
|
return runner
|
||||||
|
case 1:
|
||||||
|
if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
|
||||||
|
panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
|
||||||
|
}
|
||||||
|
|
||||||
|
wrappedBody := func(done chan<- interface{}) {
|
||||||
|
bodyValue := reflect.ValueOf(body)
|
||||||
|
bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.isAsync = true
|
||||||
|
runner.asyncFunc = wrappedBody
|
||||||
|
return runner
|
||||||
|
}
|
||||||
|
|
||||||
|
panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
if r.isAsync {
|
||||||
|
return r.runAsync()
|
||||||
|
} else {
|
||||||
|
return r.runSync()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
done := make(chan interface{}, 1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
finished := false
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil || !finished {
|
||||||
|
r.failer.Panic(codelocation.New(2), e)
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
close(done)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
r.asyncFunc(done)
|
||||||
|
finished = true
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-time.After(r.timeoutThreshold):
|
||||||
|
r.failer.Timeout(r.codeLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
finished := false
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil || !finished {
|
||||||
|
r.failer.Panic(codelocation.New(2), e)
|
||||||
|
}
|
||||||
|
|
||||||
|
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||||
|
}()
|
||||||
|
|
||||||
|
r.syncFunc()
|
||||||
|
finished = true
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SetupNode struct {
|
||||||
|
runner *runner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
return node.runner.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *SetupNode) Type() types.SpecComponentType {
|
||||||
|
return node.runner.nodeType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *SetupNode) CodeLocation() types.CodeLocation {
|
||||||
|
return node.runner.codeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||||
|
return &SetupNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||||
|
return &SetupNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||||
|
return &SetupNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,54 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SuiteNode interface {
|
||||||
|
Run(parallelNode int, parallelTotal int, syncHost string) bool
|
||||||
|
Passed() bool
|
||||||
|
Summary() *types.SetupSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
type simpleSuiteNode struct {
|
||||||
|
runner *runner
|
||||||
|
outcome types.SpecState
|
||||||
|
failure types.SpecFailure
|
||||||
|
runTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||||
|
t := time.Now()
|
||||||
|
node.outcome, node.failure = node.runner.run()
|
||||||
|
node.runTime = time.Since(t)
|
||||||
|
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *simpleSuiteNode) Passed() bool {
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
|
||||||
|
return &types.SetupSummary{
|
||||||
|
ComponentType: node.runner.nodeType,
|
||||||
|
CodeLocation: node.runner.codeLocation,
|
||||||
|
State: node.outcome,
|
||||||
|
RunTime: node.runTime,
|
||||||
|
Failure: node.failure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||||
|
return &simpleSuiteNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||||
|
return &simpleSuiteNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||||
|
}
|
||||||
|
}
|
89
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
89
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type synchronizedAfterSuiteNode struct {
|
||||||
|
runnerA *runner
|
||||||
|
runnerB *runner
|
||||||
|
|
||||||
|
outcome types.SpecState
|
||||||
|
failure types.SpecFailure
|
||||||
|
runTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||||
|
return &synchronizedAfterSuiteNode{
|
||||||
|
runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||||
|
runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||||
|
node.outcome, node.failure = node.runnerA.run()
|
||||||
|
|
||||||
|
if parallelNode == 1 {
|
||||||
|
if parallelTotal > 1 {
|
||||||
|
node.waitUntilOtherNodesAreDone(syncHost)
|
||||||
|
}
|
||||||
|
|
||||||
|
outcome, failure := node.runnerB.run()
|
||||||
|
|
||||||
|
if node.outcome == types.SpecStatePassed {
|
||||||
|
node.outcome, node.failure = outcome, failure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedAfterSuiteNode) Passed() bool {
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
|
||||||
|
return &types.SetupSummary{
|
||||||
|
ComponentType: node.runnerA.nodeType,
|
||||||
|
CodeLocation: node.runnerA.codeLocation,
|
||||||
|
State: node.outcome,
|
||||||
|
RunTime: node.runTime,
|
||||||
|
Failure: node.failure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
|
||||||
|
for {
|
||||||
|
if node.canRun(syncHost) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
|
||||||
|
resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
|
||||||
|
if err != nil || resp.StatusCode != http.StatusOK {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
afterSuiteData := types.RemoteAfterSuiteData{}
|
||||||
|
err = json.Unmarshal(body, &afterSuiteData)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return afterSuiteData.CanRun
|
||||||
|
}
|
182
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
182
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type synchronizedBeforeSuiteNode struct {
|
||||||
|
runnerA *runner
|
||||||
|
runnerB *runner
|
||||||
|
|
||||||
|
data []byte
|
||||||
|
|
||||||
|
outcome types.SpecState
|
||||||
|
failure types.SpecFailure
|
||||||
|
runTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||||
|
node := &synchronizedBeforeSuiteNode{}
|
||||||
|
|
||||||
|
node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||||
|
node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||||
|
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||||
|
t := time.Now()
|
||||||
|
defer func() {
|
||||||
|
node.runTime = time.Since(t)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if parallelNode == 1 {
|
||||||
|
node.outcome, node.failure = node.runA(parallelTotal, syncHost)
|
||||||
|
} else {
|
||||||
|
node.outcome, node.failure = node.waitForA(syncHost)
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.outcome != types.SpecStatePassed {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
node.outcome, node.failure = node.runnerB.run()
|
||||||
|
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
|
||||||
|
outcome, failure := node.runnerA.run()
|
||||||
|
|
||||||
|
if parallelTotal > 1 {
|
||||||
|
state := types.RemoteBeforeSuiteStatePassed
|
||||||
|
if outcome != types.SpecStatePassed {
|
||||||
|
state = types.RemoteBeforeSuiteStateFailed
|
||||||
|
}
|
||||||
|
json := (types.RemoteBeforeSuiteData{
|
||||||
|
Data: node.data,
|
||||||
|
State: state,
|
||||||
|
}).ToJSON()
|
||||||
|
http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
|
||||||
|
}
|
||||||
|
|
||||||
|
return outcome, failure
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
|
||||||
|
failure := func(message string) types.SpecFailure {
|
||||||
|
return types.SpecFailure{
|
||||||
|
Message: message,
|
||||||
|
Location: node.runnerA.codeLocation,
|
||||||
|
ComponentType: node.runnerA.nodeType,
|
||||||
|
ComponentIndex: node.runnerA.componentIndex,
|
||||||
|
ComponentCodeLocation: node.runnerA.codeLocation,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
resp, err := http.Get(syncHost + "/BeforeSuiteState")
|
||||||
|
if err != nil || resp.StatusCode != http.StatusOK {
|
||||||
|
return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
beforeSuiteData := types.RemoteBeforeSuiteData{}
|
||||||
|
err = json.Unmarshal(body, &beforeSuiteData)
|
||||||
|
if err != nil {
|
||||||
|
return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch beforeSuiteData.State {
|
||||||
|
case types.RemoteBeforeSuiteStatePassed:
|
||||||
|
node.data = beforeSuiteData.Data
|
||||||
|
return types.SpecStatePassed, types.SpecFailure{}
|
||||||
|
case types.RemoteBeforeSuiteStateFailed:
|
||||||
|
return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
|
||||||
|
case types.RemoteBeforeSuiteStateDisappeared:
|
||||||
|
return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
return types.SpecStateFailed, failure("Shouldn't get here!")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) Passed() bool {
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
|
||||||
|
return &types.SetupSummary{
|
||||||
|
ComponentType: node.runnerA.nodeType,
|
||||||
|
CodeLocation: node.runnerA.codeLocation,
|
||||||
|
State: node.outcome,
|
||||||
|
RunTime: node.runTime,
|
||||||
|
Failure: node.failure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
|
||||||
|
typeA := reflect.TypeOf(bodyA)
|
||||||
|
if typeA.Kind() != reflect.Func {
|
||||||
|
panic("SynchronizedBeforeSuite expects a function as its first argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
takesNothing := typeA.NumIn() == 0
|
||||||
|
takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
|
||||||
|
returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
|
||||||
|
|
||||||
|
if !((takesNothing || takesADoneChannel) && returnsBytes) {
|
||||||
|
panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if takesADoneChannel {
|
||||||
|
return func(done chan<- interface{}) {
|
||||||
|
out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
|
||||||
|
node.data = out[0].Interface().([]byte)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
|
||||||
|
node.data = out[0].Interface().([]byte)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
|
||||||
|
typeB := reflect.TypeOf(bodyB)
|
||||||
|
if typeB.Kind() != reflect.Func {
|
||||||
|
panic("SynchronizedBeforeSuite expects a function as its second argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
returnsNothing := typeB.NumOut() == 0
|
||||||
|
takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
|
||||||
|
takesBytesAndDone := typeB.NumIn() == 2 &&
|
||||||
|
typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
|
||||||
|
typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
|
||||||
|
|
||||||
|
if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
|
||||||
|
panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
|
||||||
|
}
|
||||||
|
|
||||||
|
if takesBytesAndDone {
|
||||||
|
return func(done chan<- interface{}) {
|
||||||
|
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,250 @@
|
||||||
|
/*
|
||||||
|
|
||||||
|
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
|
||||||
|
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
|
||||||
|
|
||||||
|
ginkgo -nodes=N
|
||||||
|
|
||||||
|
where N is the number of nodes you desire.
|
||||||
|
*/
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type configAndSuite struct {
|
||||||
|
config config.GinkgoConfigType
|
||||||
|
summary *types.SuiteSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
type Aggregator struct {
|
||||||
|
nodeCount int
|
||||||
|
config config.DefaultReporterConfigType
|
||||||
|
stenographer stenographer.Stenographer
|
||||||
|
result chan bool
|
||||||
|
|
||||||
|
suiteBeginnings chan configAndSuite
|
||||||
|
aggregatedSuiteBeginnings []configAndSuite
|
||||||
|
|
||||||
|
beforeSuites chan *types.SetupSummary
|
||||||
|
aggregatedBeforeSuites []*types.SetupSummary
|
||||||
|
|
||||||
|
afterSuites chan *types.SetupSummary
|
||||||
|
aggregatedAfterSuites []*types.SetupSummary
|
||||||
|
|
||||||
|
specCompletions chan *types.SpecSummary
|
||||||
|
completedSpecs []*types.SpecSummary
|
||||||
|
|
||||||
|
suiteEndings chan *types.SuiteSummary
|
||||||
|
aggregatedSuiteEndings []*types.SuiteSummary
|
||||||
|
specs []*types.SpecSummary
|
||||||
|
|
||||||
|
startTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
|
||||||
|
aggregator := &Aggregator{
|
||||||
|
nodeCount: nodeCount,
|
||||||
|
result: result,
|
||||||
|
config: config,
|
||||||
|
stenographer: stenographer,
|
||||||
|
|
||||||
|
suiteBeginnings: make(chan configAndSuite, 0),
|
||||||
|
beforeSuites: make(chan *types.SetupSummary, 0),
|
||||||
|
afterSuites: make(chan *types.SetupSummary, 0),
|
||||||
|
specCompletions: make(chan *types.SpecSummary, 0),
|
||||||
|
suiteEndings: make(chan *types.SuiteSummary, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
go aggregator.mux()
|
||||||
|
|
||||||
|
return aggregator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
|
aggregator.suiteBeginnings <- configAndSuite{config, summary}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.beforeSuites <- setupSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.afterSuites <- setupSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
|
//noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
|
aggregator.specCompletions <- specSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||||
|
aggregator.suiteEndings <- summary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) mux() {
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case configAndSuite := <-aggregator.suiteBeginnings:
|
||||||
|
aggregator.registerSuiteBeginning(configAndSuite)
|
||||||
|
case setupSummary := <-aggregator.beforeSuites:
|
||||||
|
aggregator.registerBeforeSuite(setupSummary)
|
||||||
|
case setupSummary := <-aggregator.afterSuites:
|
||||||
|
aggregator.registerAfterSuite(setupSummary)
|
||||||
|
case specSummary := <-aggregator.specCompletions:
|
||||||
|
aggregator.registerSpecCompletion(specSummary)
|
||||||
|
case suite := <-aggregator.suiteEndings:
|
||||||
|
finished, passed := aggregator.registerSuiteEnding(suite)
|
||||||
|
if finished {
|
||||||
|
aggregator.result <- passed
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
|
||||||
|
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
|
||||||
|
|
||||||
|
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
|
||||||
|
aggregator.startTime = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
|
||||||
|
|
||||||
|
numberOfSpecsToRun := 0
|
||||||
|
totalNumberOfSpecs := 0
|
||||||
|
for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings {
|
||||||
|
numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun
|
||||||
|
totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct)
|
||||||
|
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
|
||||||
|
aggregator.flushCompletedSpecs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
|
||||||
|
aggregator.flushCompletedSpecs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
|
||||||
|
aggregator.flushCompletedSpecs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
|
||||||
|
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
|
||||||
|
aggregator.specs = append(aggregator.specs, specSummary)
|
||||||
|
aggregator.flushCompletedSpecs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) flushCompletedSpecs() {
|
||||||
|
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
|
||||||
|
aggregator.announceBeforeSuite(setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, specSummary := range aggregator.completedSpecs {
|
||||||
|
aggregator.announceSpec(specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, setupSummary := range aggregator.aggregatedAfterSuites {
|
||||||
|
aggregator.announceAfterSuite(setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
|
||||||
|
aggregator.completedSpecs = []*types.SpecSummary{}
|
||||||
|
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||||
|
if setupSummary.State != types.SpecStatePassed {
|
||||||
|
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||||
|
if setupSummary.State != types.SpecStatePassed {
|
||||||
|
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
|
||||||
|
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||||
|
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||||
|
|
||||||
|
switch specSummary.State {
|
||||||
|
case types.SpecStatePassed:
|
||||||
|
if specSummary.IsMeasurement {
|
||||||
|
aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
|
||||||
|
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
|
||||||
|
aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
|
||||||
|
} else {
|
||||||
|
aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
case types.SpecStatePending:
|
||||||
|
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
|
||||||
|
case types.SpecStateSkipped:
|
||||||
|
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
case types.SpecStateTimedOut:
|
||||||
|
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
|
||||||
|
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
|
||||||
|
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregatedSuiteSummary := &types.SuiteSummary{}
|
||||||
|
aggregatedSuiteSummary.SuiteSucceeded = true
|
||||||
|
|
||||||
|
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
||||||
|
if suiteSummary.SuiteSucceeded == false {
|
||||||
|
aggregatedSuiteSummary.SuiteSucceeded = false
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
|
||||||
|
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
|
||||||
|
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
|
||||||
|
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
|
||||||
|
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
|
||||||
|
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
|
||||||
|
|
||||||
|
aggregator.stenographer.SummarizeFailures(aggregator.specs)
|
||||||
|
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
|
||||||
|
|
||||||
|
return true, aggregatedSuiteSummary.SuiteSucceeded
|
||||||
|
}
|
90
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
90
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
//An interface to net/http's client to allow the injection of fakes under test
|
||||||
|
type Poster interface {
|
||||||
|
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
The ForwardingReporter is a Ginkgo reporter that forwards information to
|
||||||
|
a Ginkgo remote server.
|
||||||
|
|
||||||
|
When streaming parallel test output, this repoter is automatically installed by Ginkgo.
|
||||||
|
|
||||||
|
This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
|
||||||
|
detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
|
||||||
|
in place of Ginkgo's DefaultReporter.
|
||||||
|
*/
|
||||||
|
|
||||||
|
type ForwardingReporter struct {
|
||||||
|
serverHost string
|
||||||
|
poster Poster
|
||||||
|
outputInterceptor OutputInterceptor
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter {
|
||||||
|
return &ForwardingReporter{
|
||||||
|
serverHost: serverHost,
|
||||||
|
poster: poster,
|
||||||
|
outputInterceptor: outputInterceptor,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) post(path string, data interface{}) {
|
||||||
|
encoded, _ := json.Marshal(data)
|
||||||
|
buffer := bytes.NewBuffer(encoded)
|
||||||
|
reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
|
data := struct {
|
||||||
|
Config config.GinkgoConfigType `json:"config"`
|
||||||
|
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||||
|
}{
|
||||||
|
conf,
|
||||||
|
summary,
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter.outputInterceptor.StartInterceptingOutput()
|
||||||
|
reporter.post("/SpecSuiteWillBegin", data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
reporter.outputInterceptor.StartInterceptingOutput()
|
||||||
|
setupSummary.CapturedOutput = output
|
||||||
|
reporter.post("/BeforeSuiteDidRun", setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
|
reporter.post("/SpecWillRun", specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
|
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
reporter.outputInterceptor.StartInterceptingOutput()
|
||||||
|
specSummary.CapturedOutput = output
|
||||||
|
reporter.post("/SpecDidComplete", specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
reporter.outputInterceptor.StartInterceptingOutput()
|
||||||
|
setupSummary.CapturedOutput = output
|
||||||
|
reporter.post("/AfterSuiteDidRun", setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||||
|
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
reporter.post("/SpecSuiteDidEnd", summary)
|
||||||
|
}
|
10
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
10
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
package remote
|
||||||
|
|
||||||
|
/*
|
||||||
|
The OutputInterceptor is used by the ForwardingReporter to
|
||||||
|
intercept and capture all stdin and stderr output during a test run.
|
||||||
|
*/
|
||||||
|
type OutputInterceptor interface {
|
||||||
|
StartInterceptingOutput() error
|
||||||
|
StopInterceptingAndReturnOutput() (string, error)
|
||||||
|
}
|
55
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
55
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
// +build freebsd openbsd netbsd dragonfly darwin linux
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewOutputInterceptor() OutputInterceptor {
|
||||||
|
return &outputInterceptor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type outputInterceptor struct {
|
||||||
|
redirectFile *os.File
|
||||||
|
intercepting bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||||
|
if interceptor.intercepting {
|
||||||
|
return errors.New("Already intercepting output!")
|
||||||
|
}
|
||||||
|
interceptor.intercepting = true
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call a function in ./syscall_dup_*.go
|
||||||
|
// If building for everything other than linux_arm64,
|
||||||
|
// use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2)
|
||||||
|
// call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3
|
||||||
|
syscallDup(int(interceptor.redirectFile.Fd()), 1)
|
||||||
|
syscallDup(int(interceptor.redirectFile.Fd()), 2)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||||
|
if !interceptor.intercepting {
|
||||||
|
return "", errors.New("Not intercepting output!")
|
||||||
|
}
|
||||||
|
|
||||||
|
interceptor.redirectFile.Close()
|
||||||
|
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
||||||
|
os.Remove(interceptor.redirectFile.Name())
|
||||||
|
|
||||||
|
interceptor.intercepting = false
|
||||||
|
|
||||||
|
return string(output), err
|
||||||
|
}
|
33
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
33
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewOutputInterceptor() OutputInterceptor {
|
||||||
|
return &outputInterceptor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type outputInterceptor struct {
|
||||||
|
intercepting bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||||
|
if interceptor.intercepting {
|
||||||
|
return errors.New("Already intercepting output!")
|
||||||
|
}
|
||||||
|
interceptor.intercepting = true
|
||||||
|
|
||||||
|
// not working on windows...
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||||
|
// not working on windows...
|
||||||
|
interceptor.intercepting = false
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
}
|
|
@ -0,0 +1,204 @@
|
||||||
|
/*
|
||||||
|
|
||||||
|
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||||
|
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/reporters"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||||
|
It then forwards that communication to attached reporters.
|
||||||
|
*/
|
||||||
|
type Server struct {
|
||||||
|
listener net.Listener
|
||||||
|
reporters []reporters.Reporter
|
||||||
|
alives []func() bool
|
||||||
|
lock *sync.Mutex
|
||||||
|
beforeSuiteData types.RemoteBeforeSuiteData
|
||||||
|
parallelTotal int
|
||||||
|
}
|
||||||
|
|
||||||
|
//Create a new server, automatically selecting a port
|
||||||
|
func NewServer(parallelTotal int) (*Server, error) {
|
||||||
|
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Server{
|
||||||
|
listener: listener,
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
alives: make([]func() bool, parallelTotal),
|
||||||
|
beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending},
|
||||||
|
parallelTotal: parallelTotal,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||||
|
func (server *Server) Start() {
|
||||||
|
httpServer := &http.Server{}
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
httpServer.Handler = mux
|
||||||
|
|
||||||
|
//streaming endpoints
|
||||||
|
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
|
||||||
|
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
|
||||||
|
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
|
||||||
|
mux.HandleFunc("/SpecWillRun", server.specWillRun)
|
||||||
|
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
|
||||||
|
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
|
||||||
|
|
||||||
|
//synchronization endpoints
|
||||||
|
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
|
||||||
|
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
|
||||||
|
|
||||||
|
go httpServer.Serve(server.listener)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Stop the server
|
||||||
|
func (server *Server) Close() {
|
||||||
|
server.listener.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||||
|
func (server *Server) Address() string {
|
||||||
|
return "http://" + server.listener.Addr().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Streaming Endpoints
|
||||||
|
//
|
||||||
|
|
||||||
|
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||||
|
func (server *Server) readAll(request *http.Request) []byte {
|
||||||
|
defer request.Body.Close()
|
||||||
|
body, _ := ioutil.ReadAll(request.Body)
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
|
||||||
|
server.reporters = reporters
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
|
||||||
|
var data struct {
|
||||||
|
Config config.GinkgoConfigType `json:"config"`
|
||||||
|
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||||
|
}
|
||||||
|
|
||||||
|
json.Unmarshal(body, &data)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
var setupSummary *types.SetupSummary
|
||||||
|
json.Unmarshal(body, &setupSummary)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.BeforeSuiteDidRun(setupSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
var setupSummary *types.SetupSummary
|
||||||
|
json.Unmarshal(body, &setupSummary)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.AfterSuiteDidRun(setupSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
var specSummary *types.SpecSummary
|
||||||
|
json.Unmarshal(body, &specSummary)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.SpecWillRun(specSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
var specSummary *types.SpecSummary
|
||||||
|
json.Unmarshal(body, &specSummary)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.SpecDidComplete(specSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
var suiteSummary *types.SuiteSummary
|
||||||
|
json.Unmarshal(body, &suiteSummary)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.SpecSuiteDidEnd(suiteSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Synchronization Endpoints
|
||||||
|
//
|
||||||
|
|
||||||
|
func (server *Server) RegisterAlive(node int, alive func() bool) {
|
||||||
|
server.lock.Lock()
|
||||||
|
defer server.lock.Unlock()
|
||||||
|
server.alives[node-1] = alive
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) nodeIsAlive(node int) bool {
|
||||||
|
server.lock.Lock()
|
||||||
|
defer server.lock.Unlock()
|
||||||
|
alive := server.alives[node-1]
|
||||||
|
if alive == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return alive()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
if request.Method == "POST" {
|
||||||
|
dec := json.NewDecoder(request.Body)
|
||||||
|
dec.Decode(&(server.beforeSuiteData))
|
||||||
|
} else {
|
||||||
|
beforeSuiteData := server.beforeSuiteData
|
||||||
|
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
|
||||||
|
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
|
||||||
|
}
|
||||||
|
enc := json.NewEncoder(writer)
|
||||||
|
enc.Encode(beforeSuiteData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
afterSuiteData := types.RemoteAfterSuiteData{
|
||||||
|
CanRun: true,
|
||||||
|
}
|
||||||
|
for i := 2; i <= server.parallelTotal; i++ {
|
||||||
|
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := json.NewEncoder(writer)
|
||||||
|
enc.Encode(afterSuiteData)
|
||||||
|
}
|
11
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
generated
vendored
Normal file
11
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build linux,arm64
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so
|
||||||
|
// use the nearly identical syscall.Dup3 instead
|
||||||
|
func syscallDup(oldfd int, newfd int) (err error) {
|
||||||
|
return syscall.Dup3(oldfd, newfd, 0)
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
// +build !linux !arm64
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func syscallDup(oldfd int, newfd int) (err error) {
|
||||||
|
return syscall.Dup2(oldfd, newfd)
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
package spec
|
||||||
|
|
||||||
|
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
||||||
|
if length == 0 {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have more nodes than tests. Trivial case.
|
||||||
|
if parallelTotal >= length {
|
||||||
|
if parallelNode > length {
|
||||||
|
return 0, 0
|
||||||
|
} else {
|
||||||
|
return parallelNode - 1, 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is the minimum amount of tests that a node will be required to run
|
||||||
|
minTestsPerNode := length / parallelTotal
|
||||||
|
|
||||||
|
// This is the maximum amount of tests that a node will be required to run
|
||||||
|
// The algorithm guarantees that this would be equal to at least the minimum amount
|
||||||
|
// and at most one more
|
||||||
|
maxTestsPerNode := minTestsPerNode
|
||||||
|
if length%parallelTotal != 0 {
|
||||||
|
maxTestsPerNode++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Number of nodes that will have to run the maximum amount of tests per node
|
||||||
|
numMaxLoadNodes := length % parallelTotal
|
||||||
|
|
||||||
|
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
|
||||||
|
var numPrecedingMaxLoadNodes int
|
||||||
|
if parallelNode > numMaxLoadNodes {
|
||||||
|
numPrecedingMaxLoadNodes = numMaxLoadNodes
|
||||||
|
} else {
|
||||||
|
numPrecedingMaxLoadNodes = parallelNode - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
|
||||||
|
var numPrecedingMinLoadNodes int
|
||||||
|
if parallelNode <= numMaxLoadNodes {
|
||||||
|
numPrecedingMinLoadNodes = 0
|
||||||
|
} else {
|
||||||
|
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evaluate the test start index and number of tests to run
|
||||||
|
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
|
||||||
|
if parallelNode > numMaxLoadNodes {
|
||||||
|
count = minTestsPerNode
|
||||||
|
} else {
|
||||||
|
count = maxTestsPerNode
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,197 @@
|
||||||
|
package spec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/containernode"
|
||||||
|
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Spec struct {
|
||||||
|
subject leafnodes.SubjectNode
|
||||||
|
focused bool
|
||||||
|
announceProgress bool
|
||||||
|
|
||||||
|
containers []*containernode.ContainerNode
|
||||||
|
|
||||||
|
state types.SpecState
|
||||||
|
runTime time.Duration
|
||||||
|
failure types.SpecFailure
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
|
||||||
|
spec := &Spec{
|
||||||
|
subject: subject,
|
||||||
|
containers: containers,
|
||||||
|
focused: subject.Flag() == types.FlagTypeFocused,
|
||||||
|
announceProgress: announceProgress,
|
||||||
|
}
|
||||||
|
|
||||||
|
spec.processFlag(subject.Flag())
|
||||||
|
for i := len(containers) - 1; i >= 0; i-- {
|
||||||
|
spec.processFlag(containers[i].Flag())
|
||||||
|
}
|
||||||
|
|
||||||
|
return spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) processFlag(flag types.FlagType) {
|
||||||
|
if flag == types.FlagTypeFocused {
|
||||||
|
spec.focused = true
|
||||||
|
} else if flag == types.FlagTypePending {
|
||||||
|
spec.state = types.SpecStatePending
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Skip() {
|
||||||
|
spec.state = types.SpecStateSkipped
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Failed() bool {
|
||||||
|
return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Passed() bool {
|
||||||
|
return spec.state == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Pending() bool {
|
||||||
|
return spec.state == types.SpecStatePending
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Skipped() bool {
|
||||||
|
return spec.state == types.SpecStateSkipped
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Focused() bool {
|
||||||
|
return spec.focused
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) IsMeasurement() bool {
|
||||||
|
return spec.subject.Type() == types.SpecComponentTypeMeasure
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
||||||
|
componentTexts := make([]string, len(spec.containers)+1)
|
||||||
|
componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
|
||||||
|
|
||||||
|
for i, container := range spec.containers {
|
||||||
|
componentTexts[i] = container.Text()
|
||||||
|
componentCodeLocations[i] = container.CodeLocation()
|
||||||
|
}
|
||||||
|
|
||||||
|
componentTexts[len(spec.containers)] = spec.subject.Text()
|
||||||
|
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
|
||||||
|
|
||||||
|
return &types.SpecSummary{
|
||||||
|
IsMeasurement: spec.IsMeasurement(),
|
||||||
|
NumberOfSamples: spec.subject.Samples(),
|
||||||
|
ComponentTexts: componentTexts,
|
||||||
|
ComponentCodeLocations: componentCodeLocations,
|
||||||
|
State: spec.state,
|
||||||
|
RunTime: spec.runTime,
|
||||||
|
Failure: spec.failure,
|
||||||
|
Measurements: spec.measurementsReport(),
|
||||||
|
SuiteID: suiteID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) ConcatenatedString() string {
|
||||||
|
s := ""
|
||||||
|
for _, container := range spec.containers {
|
||||||
|
s += container.Text() + " "
|
||||||
|
}
|
||||||
|
|
||||||
|
return s + spec.subject.Text()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Run(writer io.Writer) {
|
||||||
|
startTime := time.Now()
|
||||||
|
defer func() {
|
||||||
|
spec.runTime = time.Since(startTime)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
||||||
|
spec.runSample(sample, writer)
|
||||||
|
|
||||||
|
if spec.state != types.SpecStatePassed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||||
|
spec.state = types.SpecStatePassed
|
||||||
|
spec.failure = types.SpecFailure{}
|
||||||
|
innerMostContainerIndexToUnwind := -1
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||||
|
container := spec.containers[i]
|
||||||
|
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
||||||
|
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
||||||
|
afterEachState, afterEachFailure := afterEach.Run()
|
||||||
|
if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
||||||
|
spec.state = afterEachState
|
||||||
|
spec.failure = afterEachFailure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for i, container := range spec.containers {
|
||||||
|
innerMostContainerIndexToUnwind = i
|
||||||
|
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
||||||
|
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
||||||
|
spec.state, spec.failure = beforeEach.Run()
|
||||||
|
if spec.state != types.SpecStatePassed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, container := range spec.containers {
|
||||||
|
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
||||||
|
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
||||||
|
spec.state, spec.failure = justBeforeEach.Run()
|
||||||
|
if spec.state != types.SpecStatePassed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
spec.announceSubject(writer, spec.subject)
|
||||||
|
spec.state, spec.failure = spec.subject.Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
||||||
|
if spec.announceProgress {
|
||||||
|
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
|
||||||
|
writer.Write([]byte(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
|
||||||
|
if spec.announceProgress {
|
||||||
|
nodeType := ""
|
||||||
|
switch subject.Type() {
|
||||||
|
case types.SpecComponentTypeIt:
|
||||||
|
nodeType = "It"
|
||||||
|
case types.SpecComponentTypeMeasure:
|
||||||
|
nodeType = "Measure"
|
||||||
|
}
|
||||||
|
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
|
||||||
|
writer.Write([]byte(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
|
||||||
|
if !spec.IsMeasurement() || spec.Failed() {
|
||||||
|
return map[string]*types.SpecMeasurement{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
|
||||||
|
}
|
|
@ -0,0 +1,122 @@
|
||||||
|
package spec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Specs struct {
|
||||||
|
specs []*Spec
|
||||||
|
numberOfOriginalSpecs int
|
||||||
|
hasProgrammaticFocus bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSpecs(specs []*Spec) *Specs {
|
||||||
|
return &Specs{
|
||||||
|
specs: specs,
|
||||||
|
numberOfOriginalSpecs: len(specs),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) Specs() []*Spec {
|
||||||
|
return e.specs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) NumberOfOriginalSpecs() int {
|
||||||
|
return e.numberOfOriginalSpecs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) HasProgrammaticFocus() bool {
|
||||||
|
return e.hasProgrammaticFocus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) Shuffle(r *rand.Rand) {
|
||||||
|
sort.Sort(e)
|
||||||
|
permutation := r.Perm(len(e.specs))
|
||||||
|
shuffledSpecs := make([]*Spec, len(e.specs))
|
||||||
|
for i, j := range permutation {
|
||||||
|
shuffledSpecs[i] = e.specs[j]
|
||||||
|
}
|
||||||
|
e.specs = shuffledSpecs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
|
||||||
|
if focusString == "" && skipString == "" {
|
||||||
|
e.applyProgrammaticFocus()
|
||||||
|
} else {
|
||||||
|
e.applyRegExpFocus(description, focusString, skipString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) applyProgrammaticFocus() {
|
||||||
|
e.hasProgrammaticFocus = false
|
||||||
|
for _, spec := range e.specs {
|
||||||
|
if spec.Focused() && !spec.Pending() {
|
||||||
|
e.hasProgrammaticFocus = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.hasProgrammaticFocus {
|
||||||
|
for _, spec := range e.specs {
|
||||||
|
if !spec.Focused() {
|
||||||
|
spec.Skip()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) applyRegExpFocus(description string, focusString string, skipString string) {
|
||||||
|
for _, spec := range e.specs {
|
||||||
|
matchesFocus := true
|
||||||
|
matchesSkip := false
|
||||||
|
|
||||||
|
toMatch := []byte(description + " " + spec.ConcatenatedString())
|
||||||
|
|
||||||
|
if focusString != "" {
|
||||||
|
focusFilter := regexp.MustCompile(focusString)
|
||||||
|
matchesFocus = focusFilter.Match([]byte(toMatch))
|
||||||
|
}
|
||||||
|
|
||||||
|
if skipString != "" {
|
||||||
|
skipFilter := regexp.MustCompile(skipString)
|
||||||
|
matchesSkip = skipFilter.Match([]byte(toMatch))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matchesFocus || matchesSkip {
|
||||||
|
spec.Skip()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) SkipMeasurements() {
|
||||||
|
for _, spec := range e.specs {
|
||||||
|
if spec.IsMeasurement() {
|
||||||
|
spec.Skip()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) TrimForParallelization(total int, node int) {
|
||||||
|
startIndex, count := ParallelizedIndexRange(len(e.specs), total, node)
|
||||||
|
if count == 0 {
|
||||||
|
e.specs = make([]*Spec, 0)
|
||||||
|
} else {
|
||||||
|
e.specs = e.specs[startIndex : startIndex+count]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//sort.Interface
|
||||||
|
|
||||||
|
func (e *Specs) Len() int {
|
||||||
|
return len(e.specs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) Less(i, j int) bool {
|
||||||
|
return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) Swap(i, j int) {
|
||||||
|
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
package specrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func randomID() string {
|
||||||
|
b := make([]byte, 8)
|
||||||
|
_, err := rand.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
|
||||||
|
}
|
|
@ -0,0 +1,324 @@
|
||||||
|
package specrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||||
|
"github.com/onsi/ginkgo/internal/spec"
|
||||||
|
Writer "github.com/onsi/ginkgo/internal/writer"
|
||||||
|
"github.com/onsi/ginkgo/reporters"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SpecRunner struct {
|
||||||
|
description string
|
||||||
|
beforeSuiteNode leafnodes.SuiteNode
|
||||||
|
specs *spec.Specs
|
||||||
|
afterSuiteNode leafnodes.SuiteNode
|
||||||
|
reporters []reporters.Reporter
|
||||||
|
startTime time.Time
|
||||||
|
suiteID string
|
||||||
|
runningSpec *spec.Spec
|
||||||
|
writer Writer.WriterInterface
|
||||||
|
config config.GinkgoConfigType
|
||||||
|
interrupted bool
|
||||||
|
lock *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
|
||||||
|
return &SpecRunner{
|
||||||
|
description: description,
|
||||||
|
beforeSuiteNode: beforeSuiteNode,
|
||||||
|
specs: specs,
|
||||||
|
afterSuiteNode: afterSuiteNode,
|
||||||
|
reporters: reporters,
|
||||||
|
writer: writer,
|
||||||
|
config: config,
|
||||||
|
suiteID: randomID(),
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) Run() bool {
|
||||||
|
if runner.config.DryRun {
|
||||||
|
runner.performDryRun()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.reportSuiteWillBegin()
|
||||||
|
go runner.registerForInterrupts()
|
||||||
|
|
||||||
|
suitePassed := runner.runBeforeSuite()
|
||||||
|
|
||||||
|
if suitePassed {
|
||||||
|
suitePassed = runner.runSpecs()
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.blockForeverIfInterrupted()
|
||||||
|
|
||||||
|
suitePassed = runner.runAfterSuite() && suitePassed
|
||||||
|
|
||||||
|
runner.reportSuiteDidEnd(suitePassed)
|
||||||
|
|
||||||
|
return suitePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) performDryRun() {
|
||||||
|
runner.reportSuiteWillBegin()
|
||||||
|
|
||||||
|
if runner.beforeSuiteNode != nil {
|
||||||
|
summary := runner.beforeSuiteNode.Summary()
|
||||||
|
summary.State = types.SpecStatePassed
|
||||||
|
runner.reportBeforeSuite(summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, spec := range runner.specs.Specs() {
|
||||||
|
summary := spec.Summary(runner.suiteID)
|
||||||
|
runner.reportSpecWillRun(summary)
|
||||||
|
if summary.State == types.SpecStateInvalid {
|
||||||
|
summary.State = types.SpecStatePassed
|
||||||
|
}
|
||||||
|
runner.reportSpecDidComplete(summary, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
if runner.afterSuiteNode != nil {
|
||||||
|
summary := runner.afterSuiteNode.Summary()
|
||||||
|
summary.State = types.SpecStatePassed
|
||||||
|
runner.reportAfterSuite(summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.reportSuiteDidEnd(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) runBeforeSuite() bool {
|
||||||
|
if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.writer.Truncate()
|
||||||
|
conf := runner.config
|
||||||
|
passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||||
|
if !passed {
|
||||||
|
runner.writer.DumpOut()
|
||||||
|
}
|
||||||
|
runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
|
||||||
|
return passed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) runAfterSuite() bool {
|
||||||
|
if runner.afterSuiteNode == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.writer.Truncate()
|
||||||
|
conf := runner.config
|
||||||
|
passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||||
|
if !passed {
|
||||||
|
runner.writer.DumpOut()
|
||||||
|
}
|
||||||
|
runner.reportAfterSuite(runner.afterSuiteNode.Summary())
|
||||||
|
return passed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) runSpecs() bool {
|
||||||
|
suiteFailed := false
|
||||||
|
skipRemainingSpecs := false
|
||||||
|
for _, spec := range runner.specs.Specs() {
|
||||||
|
if runner.wasInterrupted() {
|
||||||
|
return suiteFailed
|
||||||
|
}
|
||||||
|
if skipRemainingSpecs {
|
||||||
|
spec.Skip()
|
||||||
|
}
|
||||||
|
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||||
|
|
||||||
|
if !spec.Skipped() && !spec.Pending() {
|
||||||
|
runner.runningSpec = spec
|
||||||
|
spec.Run(runner.writer)
|
||||||
|
runner.runningSpec = nil
|
||||||
|
if spec.Failed() {
|
||||||
|
suiteFailed = true
|
||||||
|
}
|
||||||
|
} else if spec.Pending() && runner.config.FailOnPending {
|
||||||
|
suiteFailed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||||
|
|
||||||
|
if spec.Failed() && runner.config.FailFast {
|
||||||
|
skipRemainingSpecs = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return !suiteFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
|
||||||
|
if runner.runningSpec == nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return runner.runningSpec.Summary(runner.suiteID), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) registerForInterrupts() {
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||||
|
|
||||||
|
<-c
|
||||||
|
signal.Stop(c)
|
||||||
|
runner.markInterrupted()
|
||||||
|
go runner.registerForHardInterrupts()
|
||||||
|
runner.writer.DumpOutWithHeader(`
|
||||||
|
Received interrupt. Emitting contents of GinkgoWriter...
|
||||||
|
---------------------------------------------------------
|
||||||
|
`)
|
||||||
|
if runner.afterSuiteNode != nil {
|
||||||
|
fmt.Fprint(os.Stderr, `
|
||||||
|
---------------------------------------------------------
|
||||||
|
Received interrupt. Running AfterSuite...
|
||||||
|
^C again to terminate immediately
|
||||||
|
`)
|
||||||
|
runner.runAfterSuite()
|
||||||
|
}
|
||||||
|
runner.reportSuiteDidEnd(false)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) registerForHardInterrupts() {
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||||
|
|
||||||
|
<-c
|
||||||
|
fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) blockForeverIfInterrupted() {
|
||||||
|
runner.lock.Lock()
|
||||||
|
interrupted := runner.interrupted
|
||||||
|
runner.lock.Unlock()
|
||||||
|
|
||||||
|
if interrupted {
|
||||||
|
select {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) markInterrupted() {
|
||||||
|
runner.lock.Lock()
|
||||||
|
defer runner.lock.Unlock()
|
||||||
|
runner.interrupted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) wasInterrupted() bool {
|
||||||
|
runner.lock.Lock()
|
||||||
|
defer runner.lock.Unlock()
|
||||||
|
return runner.interrupted
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportSuiteWillBegin() {
|
||||||
|
runner.startTime = time.Now()
|
||||||
|
summary := runner.summary(true)
|
||||||
|
for _, reporter := range runner.reporters {
|
||||||
|
reporter.SpecSuiteWillBegin(runner.config, summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
|
||||||
|
for _, reporter := range runner.reporters {
|
||||||
|
reporter.BeforeSuiteDidRun(summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
|
||||||
|
for _, reporter := range runner.reporters {
|
||||||
|
reporter.AfterSuiteDidRun(summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
|
||||||
|
runner.writer.Truncate()
|
||||||
|
|
||||||
|
for _, reporter := range runner.reporters {
|
||||||
|
reporter.SpecWillRun(summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
|
||||||
|
for i := len(runner.reporters) - 1; i >= 1; i-- {
|
||||||
|
runner.reporters[i].SpecDidComplete(summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
if failed {
|
||||||
|
runner.writer.DumpOut()
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.reporters[0].SpecDidComplete(summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
|
||||||
|
summary := runner.summary(success)
|
||||||
|
summary.RunTime = time.Since(runner.startTime)
|
||||||
|
for _, reporter := range runner.reporters {
|
||||||
|
reporter.SpecSuiteDidEnd(summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) {
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
for _, spec := range runner.specs.Specs() {
|
||||||
|
if filter(spec) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
|
||||||
|
numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return !ex.Skipped() && !ex.Pending()
|
||||||
|
})
|
||||||
|
|
||||||
|
numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return ex.Pending()
|
||||||
|
})
|
||||||
|
|
||||||
|
numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return ex.Skipped()
|
||||||
|
})
|
||||||
|
|
||||||
|
numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return ex.Passed()
|
||||||
|
})
|
||||||
|
|
||||||
|
numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return ex.Failed()
|
||||||
|
})
|
||||||
|
|
||||||
|
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
|
||||||
|
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
|
||||||
|
}
|
||||||
|
|
||||||
|
return &types.SuiteSummary{
|
||||||
|
SuiteDescription: runner.description,
|
||||||
|
SuiteSucceeded: success,
|
||||||
|
SuiteID: runner.suiteID,
|
||||||
|
|
||||||
|
NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(),
|
||||||
|
NumberOfTotalSpecs: len(runner.specs.Specs()),
|
||||||
|
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
|
||||||
|
NumberOfPendingSpecs: numberOfPendingSpecs,
|
||||||
|
NumberOfSkippedSpecs: numberOfSkippedSpecs,
|
||||||
|
NumberOfPassedSpecs: numberOfPassedSpecs,
|
||||||
|
NumberOfFailedSpecs: numberOfFailedSpecs,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,171 @@
|
||||||
|
package suite
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/internal/containernode"
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||||
|
"github.com/onsi/ginkgo/internal/spec"
|
||||||
|
"github.com/onsi/ginkgo/internal/specrunner"
|
||||||
|
"github.com/onsi/ginkgo/internal/writer"
|
||||||
|
"github.com/onsi/ginkgo/reporters"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ginkgoTestingT interface {
|
||||||
|
Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Suite struct {
|
||||||
|
topLevelContainer *containernode.ContainerNode
|
||||||
|
currentContainer *containernode.ContainerNode
|
||||||
|
containerIndex int
|
||||||
|
beforeSuiteNode leafnodes.SuiteNode
|
||||||
|
afterSuiteNode leafnodes.SuiteNode
|
||||||
|
runner *specrunner.SpecRunner
|
||||||
|
failer *failer.Failer
|
||||||
|
running bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(failer *failer.Failer) *Suite {
|
||||||
|
topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
|
||||||
|
|
||||||
|
return &Suite{
|
||||||
|
topLevelContainer: topLevelContainer,
|
||||||
|
currentContainer: topLevelContainer,
|
||||||
|
failer: failer,
|
||||||
|
containerIndex: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
|
||||||
|
if config.ParallelTotal < 1 {
|
||||||
|
panic("ginkgo.parallel.total must be >= 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
|
||||||
|
panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
|
||||||
|
}
|
||||||
|
|
||||||
|
r := rand.New(rand.NewSource(config.RandomSeed))
|
||||||
|
suite.topLevelContainer.Shuffle(r)
|
||||||
|
specs := suite.generateSpecs(description, config)
|
||||||
|
suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config)
|
||||||
|
|
||||||
|
suite.running = true
|
||||||
|
success := suite.runner.Run()
|
||||||
|
if !success {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
return success, specs.HasProgrammaticFocus()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs {
|
||||||
|
specsSlice := []*spec.Spec{}
|
||||||
|
suite.topLevelContainer.BackPropagateProgrammaticFocus()
|
||||||
|
for _, collatedNodes := range suite.topLevelContainer.Collate() {
|
||||||
|
specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
|
||||||
|
}
|
||||||
|
|
||||||
|
specs := spec.NewSpecs(specsSlice)
|
||||||
|
|
||||||
|
if config.RandomizeAllSpecs {
|
||||||
|
specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
|
||||||
|
}
|
||||||
|
|
||||||
|
specs.ApplyFocus(description, config.FocusString, config.SkipString)
|
||||||
|
|
||||||
|
if config.SkipMeasurements {
|
||||||
|
specs.SkipMeasurements()
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.ParallelTotal > 1 {
|
||||||
|
specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return specs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
|
||||||
|
return suite.runner.CurrentSpecSummary()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.beforeSuiteNode != nil {
|
||||||
|
panic("You may only call BeforeSuite once!")
|
||||||
|
}
|
||||||
|
suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.afterSuiteNode != nil {
|
||||||
|
panic("You may only call AfterSuite once!")
|
||||||
|
}
|
||||||
|
suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.beforeSuiteNode != nil {
|
||||||
|
panic("You may only call BeforeSuite once!")
|
||||||
|
}
|
||||||
|
suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.afterSuiteNode != nil {
|
||||||
|
panic("You may only call AfterSuite once!")
|
||||||
|
}
|
||||||
|
suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
|
||||||
|
container := containernode.New(text, flag, codeLocation)
|
||||||
|
suite.currentContainer.PushContainerNode(container)
|
||||||
|
|
||||||
|
previousContainer := suite.currentContainer
|
||||||
|
suite.currentContainer = container
|
||||||
|
suite.containerIndex++
|
||||||
|
|
||||||
|
body()
|
||||||
|
|
||||||
|
suite.containerIndex--
|
||||||
|
suite.currentContainer = previousContainer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||||
|
}
|
76
vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
76
vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
package testingtproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type failFunc func(message string, callerSkip ...int)
|
||||||
|
|
||||||
|
func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
|
||||||
|
return &ginkgoTestingTProxy{
|
||||||
|
fail: fail,
|
||||||
|
offset: offset,
|
||||||
|
writer: writer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ginkgoTestingTProxy struct {
|
||||||
|
fail failFunc
|
||||||
|
offset int
|
||||||
|
writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintln(args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Fail() {
|
||||||
|
t.fail("failed", t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) FailNow() {
|
||||||
|
t.fail("failed", t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintln(args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
||||||
|
fmt.Fprintln(t.writer, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
|
||||||
|
fmt.Fprintf(t.writer, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Failed() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Parallel() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
|
||||||
|
fmt.Println(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
|
||||||
|
fmt.Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) SkipNow() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Skipped() bool {
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
package writer
|
||||||
|
|
||||||
|
type FakeGinkgoWriter struct {
|
||||||
|
EventStream []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFake() *FakeGinkgoWriter {
|
||||||
|
return &FakeGinkgoWriter{
|
||||||
|
EventStream: []string{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) AddEvent(event string) {
|
||||||
|
writer.EventStream = append(writer.EventStream, event)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) Truncate() {
|
||||||
|
writer.EventStream = append(writer.EventStream, "TRUNCATE")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) DumpOut() {
|
||||||
|
writer.EventStream = append(writer.EventStream, "DUMP")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
|
||||||
|
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
|
@ -0,0 +1,71 @@
|
||||||
|
package writer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WriterInterface interface {
|
||||||
|
io.Writer
|
||||||
|
|
||||||
|
Truncate()
|
||||||
|
DumpOut()
|
||||||
|
DumpOutWithHeader(header string)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Writer struct {
|
||||||
|
buffer *bytes.Buffer
|
||||||
|
outWriter io.Writer
|
||||||
|
lock *sync.Mutex
|
||||||
|
stream bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(outWriter io.Writer) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
buffer: &bytes.Buffer{},
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
outWriter: outWriter,
|
||||||
|
stream: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) SetStream(stream bool) {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
w.stream = stream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
|
||||||
|
if w.stream {
|
||||||
|
return w.outWriter.Write(b)
|
||||||
|
} else {
|
||||||
|
return w.buffer.Write(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Truncate() {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
w.buffer.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) DumpOut() {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
if !w.stream {
|
||||||
|
w.buffer.WriteTo(w.outWriter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) DumpOutWithHeader(header string) {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
if !w.stream && w.buffer.Len() > 0 {
|
||||||
|
w.outWriter.Write([]byte(header))
|
||||||
|
w.buffer.WriteTo(w.outWriter)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,83 @@
|
||||||
|
/*
|
||||||
|
Ginkgo's Default Reporter
|
||||||
|
|
||||||
|
A number of command line flags are available to tweak Ginkgo's default output.
|
||||||
|
|
||||||
|
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
|
||||||
|
*/
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DefaultReporter struct {
|
||||||
|
config config.DefaultReporterConfigType
|
||||||
|
stenographer stenographer.Stenographer
|
||||||
|
specSummaries []*types.SpecSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
|
||||||
|
return &DefaultReporter{
|
||||||
|
config: config,
|
||||||
|
stenographer: stenographer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
|
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
|
||||||
|
if config.ParallelTotal > 1 {
|
||||||
|
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct)
|
||||||
|
}
|
||||||
|
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
if setupSummary.State != types.SpecStatePassed {
|
||||||
|
reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
if setupSummary.State != types.SpecStatePassed {
|
||||||
|
reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
|
if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||||
|
reporter.stenographer.AnnounceSpecWillRun(specSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
|
switch specSummary.State {
|
||||||
|
case types.SpecStatePassed:
|
||||||
|
if specSummary.IsMeasurement {
|
||||||
|
reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
|
||||||
|
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
|
||||||
|
reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
|
||||||
|
} else {
|
||||||
|
reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||||
|
}
|
||||||
|
case types.SpecStatePending:
|
||||||
|
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
||||||
|
case types.SpecStateSkipped:
|
||||||
|
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
case types.SpecStateTimedOut:
|
||||||
|
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter.specSummaries = append(reporter.specSummaries, specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||||
|
reporter.stenographer.SummarizeFailures(reporter.specSummaries)
|
||||||
|
reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
|
||||||
|
}
|
|
@ -0,0 +1,59 @@
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
//FakeReporter is useful for testing purposes
|
||||||
|
type FakeReporter struct {
|
||||||
|
Config config.GinkgoConfigType
|
||||||
|
|
||||||
|
BeginSummary *types.SuiteSummary
|
||||||
|
BeforeSuiteSummary *types.SetupSummary
|
||||||
|
SpecWillRunSummaries []*types.SpecSummary
|
||||||
|
SpecSummaries []*types.SpecSummary
|
||||||
|
AfterSuiteSummary *types.SetupSummary
|
||||||
|
EndSummary *types.SuiteSummary
|
||||||
|
|
||||||
|
SpecWillRunStub func(specSummary *types.SpecSummary)
|
||||||
|
SpecDidCompleteStub func(specSummary *types.SpecSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFakeReporter() *FakeReporter {
|
||||||
|
return &FakeReporter{
|
||||||
|
SpecWillRunSummaries: make([]*types.SpecSummary, 0),
|
||||||
|
SpecSummaries: make([]*types.SpecSummary, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
|
fakeR.Config = config
|
||||||
|
fakeR.BeginSummary = summary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
fakeR.BeforeSuiteSummary = setupSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
|
if fakeR.SpecWillRunStub != nil {
|
||||||
|
fakeR.SpecWillRunStub(specSummary)
|
||||||
|
}
|
||||||
|
fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
|
if fakeR.SpecDidCompleteStub != nil {
|
||||||
|
fakeR.SpecDidCompleteStub(specSummary)
|
||||||
|
}
|
||||||
|
fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
fakeR.AfterSuiteSummary = setupSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||||
|
fakeR.EndSummary = summary
|
||||||
|
}
|
|
@ -0,0 +1,139 @@
|
||||||
|
/*
|
||||||
|
|
||||||
|
JUnit XML Reporter for Ginkgo
|
||||||
|
|
||||||
|
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type JUnitTestSuite struct {
|
||||||
|
XMLName xml.Name `xml:"testsuite"`
|
||||||
|
TestCases []JUnitTestCase `xml:"testcase"`
|
||||||
|
Tests int `xml:"tests,attr"`
|
||||||
|
Failures int `xml:"failures,attr"`
|
||||||
|
Time float64 `xml:"time,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitTestCase struct {
|
||||||
|
Name string `xml:"name,attr"`
|
||||||
|
ClassName string `xml:"classname,attr"`
|
||||||
|
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
|
||||||
|
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||||
|
Time float64 `xml:"time,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitFailureMessage struct {
|
||||||
|
Type string `xml:"type,attr"`
|
||||||
|
Message string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitSkipped struct {
|
||||||
|
XMLName xml.Name `xml:"skipped"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitReporter struct {
|
||||||
|
suite JUnitTestSuite
|
||||||
|
filename string
|
||||||
|
testSuiteName string
|
||||||
|
}
|
||||||
|
|
||||||
|
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
|
||||||
|
func NewJUnitReporter(filename string) *JUnitReporter {
|
||||||
|
return &JUnitReporter{
|
||||||
|
filename: filename,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
|
reporter.suite = JUnitTestSuite{
|
||||||
|
Tests: summary.NumberOfSpecsThatWillBeRun,
|
||||||
|
TestCases: []JUnitTestCase{},
|
||||||
|
}
|
||||||
|
reporter.testSuiteName = summary.SuiteDescription
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||||
|
if setupSummary.State != types.SpecStatePassed {
|
||||||
|
testCase := JUnitTestCase{
|
||||||
|
Name: name,
|
||||||
|
ClassName: reporter.testSuiteName,
|
||||||
|
}
|
||||||
|
|
||||||
|
testCase.FailureMessage = &JUnitFailureMessage{
|
||||||
|
Type: reporter.failureTypeForState(setupSummary.State),
|
||||||
|
Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message),
|
||||||
|
}
|
||||||
|
testCase.Time = setupSummary.RunTime.Seconds()
|
||||||
|
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
|
testCase := JUnitTestCase{
|
||||||
|
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
|
||||||
|
ClassName: reporter.testSuiteName,
|
||||||
|
}
|
||||||
|
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||||
|
testCase.FailureMessage = &JUnitFailureMessage{
|
||||||
|
Type: reporter.failureTypeForState(specSummary.State),
|
||||||
|
Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||||
|
testCase.Skipped = &JUnitSkipped{}
|
||||||
|
}
|
||||||
|
testCase.Time = specSummary.RunTime.Seconds()
|
||||||
|
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||||
|
reporter.suite.Time = summary.RunTime.Seconds()
|
||||||
|
reporter.suite.Failures = summary.NumberOfFailedSpecs
|
||||||
|
file, err := os.Create(reporter.filename)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
file.WriteString(xml.Header)
|
||||||
|
encoder := xml.NewEncoder(file)
|
||||||
|
encoder.Indent(" ", " ")
|
||||||
|
err = encoder.Encode(reporter.suite)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
|
||||||
|
switch state {
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
return "Failure"
|
||||||
|
case types.SpecStateTimedOut:
|
||||||
|
return "Timeout"
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
return "Panic"
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Reporter interface {
|
||||||
|
SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
|
||||||
|
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
|
||||||
|
SpecWillRun(specSummary *types.SpecSummary)
|
||||||
|
SpecDidComplete(specSummary *types.SpecSummary)
|
||||||
|
AfterSuiteDidRun(setupSummary *types.SetupSummary)
|
||||||
|
SpecSuiteDidEnd(summary *types.SuiteSummary)
|
||||||
|
}
|
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
Normal file
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
package stenographer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
|
||||||
|
var out string
|
||||||
|
|
||||||
|
if len(args) > 0 {
|
||||||
|
out = fmt.Sprintf(format, args...)
|
||||||
|
} else {
|
||||||
|
out = format
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.color {
|
||||||
|
return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
|
||||||
|
} else {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
|
||||||
|
fmt.Println(text)
|
||||||
|
fmt.Println(strings.Repeat(bannerCharacter, len(text)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printNewLine() {
|
||||||
|
fmt.Println("")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printDelimiter() {
|
||||||
|
fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
|
||||||
|
fmt.Print(s.indent(indentation, format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
|
||||||
|
fmt.Println(s.indent(indentation, format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
|
||||||
|
var text string
|
||||||
|
|
||||||
|
if len(args) > 0 {
|
||||||
|
text = fmt.Sprintf(format, args...)
|
||||||
|
} else {
|
||||||
|
text = format
|
||||||
|
}
|
||||||
|
|
||||||
|
stringArray := strings.Split(text, "\n")
|
||||||
|
padding := ""
|
||||||
|
if indentation >= 0 {
|
||||||
|
padding = strings.Repeat(" ", indentation)
|
||||||
|
}
|
||||||
|
for i, s := range stringArray {
|
||||||
|
stringArray[i] = fmt.Sprintf("%s%s", padding, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(stringArray, "\n")
|
||||||
|
}
|
138
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
Normal file
138
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
package stenographer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
|
||||||
|
return FakeStenographerCall{
|
||||||
|
Method: method,
|
||||||
|
Args: args,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type FakeStenographer struct {
|
||||||
|
calls []FakeStenographerCall
|
||||||
|
lock *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type FakeStenographerCall struct {
|
||||||
|
Method string
|
||||||
|
Args []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFakeStenographer() *FakeStenographer {
|
||||||
|
stenographer := &FakeStenographer{
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
stenographer.Reset()
|
||||||
|
return stenographer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
|
||||||
|
stenographer.lock.Lock()
|
||||||
|
defer stenographer.lock.Unlock()
|
||||||
|
|
||||||
|
return stenographer.calls
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) Reset() {
|
||||||
|
stenographer.lock.Lock()
|
||||||
|
defer stenographer.lock.Unlock()
|
||||||
|
|
||||||
|
stenographer.calls = make([]FakeStenographerCall, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
|
||||||
|
stenographer.lock.Lock()
|
||||||
|
defer stenographer.lock.Unlock()
|
||||||
|
|
||||||
|
results := make([]FakeStenographerCall, 0)
|
||||||
|
for _, call := range stenographer.calls {
|
||||||
|
if call.Method == method {
|
||||||
|
results = append(results, call)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
|
||||||
|
stenographer.lock.Lock()
|
||||||
|
defer stenographer.lock.Unlock()
|
||||||
|
|
||||||
|
stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||||
|
stenographer.registerCall("AnnounceSpecWillRun", spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
|
||||||
|
stenographer.registerCall("AnnounceCapturedOutput", output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
|
||||||
|
stenographer.registerCall("AnnounceSuccesfulSpec", spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||||
|
stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||||
|
stenographer.registerCall("SummarizeFailures", summaries)
|
||||||
|
}
|
549
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
Normal file
549
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
Normal file
|
@ -0,0 +1,549 @@
|
||||||
|
/*
|
||||||
|
The stenographer is used by Ginkgo's reporters to generate output.
|
||||||
|
|
||||||
|
Move along, nothing to see here.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package stenographer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultStyle = "\x1b[0m"
|
||||||
|
const boldStyle = "\x1b[1m"
|
||||||
|
const redColor = "\x1b[91m"
|
||||||
|
const greenColor = "\x1b[32m"
|
||||||
|
const yellowColor = "\x1b[33m"
|
||||||
|
const cyanColor = "\x1b[36m"
|
||||||
|
const grayColor = "\x1b[90m"
|
||||||
|
const lightGrayColor = "\x1b[37m"
|
||||||
|
|
||||||
|
type cursorStateType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
cursorStateTop cursorStateType = iota
|
||||||
|
cursorStateStreaming
|
||||||
|
cursorStateMidBlock
|
||||||
|
cursorStateEndBlock
|
||||||
|
)
|
||||||
|
|
||||||
|
type Stenographer interface {
|
||||||
|
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
|
||||||
|
AnnounceAggregatedParallelRun(nodes int, succinct bool)
|
||||||
|
AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool)
|
||||||
|
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
|
||||||
|
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
|
||||||
|
|
||||||
|
AnnounceSpecWillRun(spec *types.SpecSummary)
|
||||||
|
AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||||
|
AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||||
|
|
||||||
|
AnnounceCapturedOutput(output string)
|
||||||
|
|
||||||
|
AnnounceSuccesfulSpec(spec *types.SpecSummary)
|
||||||
|
AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
|
||||||
|
AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
|
||||||
|
|
||||||
|
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
|
||||||
|
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||||
|
|
||||||
|
AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||||
|
AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||||
|
AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||||
|
|
||||||
|
SummarizeFailures(summaries []*types.SpecSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(color bool) Stenographer {
|
||||||
|
denoter := "•"
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
denoter = "+"
|
||||||
|
}
|
||||||
|
return &consoleStenographer{
|
||||||
|
color: color,
|
||||||
|
denoter: denoter,
|
||||||
|
cursorState: cursorStateTop,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type consoleStenographer struct {
|
||||||
|
color bool
|
||||||
|
denoter string
|
||||||
|
cursorState cursorStateType
|
||||||
|
}
|
||||||
|
|
||||||
|
var alternatingColors = []string{defaultStyle, grayColor}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
|
||||||
|
s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
|
||||||
|
if randomizingAll {
|
||||||
|
s.print(0, " - Will randomize all specs")
|
||||||
|
}
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, "- node #%d ", node)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.println(0,
|
||||||
|
"Parallel test node %s/%s. Assigned %s of %s specs.",
|
||||||
|
s.colorize(boldStyle, "%d", node),
|
||||||
|
s.colorize(boldStyle, "%d", nodes),
|
||||||
|
s.colorize(boldStyle, "%d", specsToRun),
|
||||||
|
s.colorize(boldStyle, "%d", totalSpecs),
|
||||||
|
)
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, "- %d nodes ", nodes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.println(0,
|
||||||
|
"Running in parallel across %s nodes",
|
||||||
|
s.colorize(boldStyle, "%d", nodes),
|
||||||
|
)
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, "- %d/%d specs ", specsToRun, total)
|
||||||
|
s.stream()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.println(0,
|
||||||
|
"Will run %s of %s specs",
|
||||||
|
s.colorize(boldStyle, "%d", specsToRun),
|
||||||
|
s.colorize(boldStyle, "%d", total),
|
||||||
|
)
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||||
|
if succinct && summary.SuiteSucceeded {
|
||||||
|
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.printNewLine()
|
||||||
|
color := greenColor
|
||||||
|
if !summary.SuiteSucceeded {
|
||||||
|
color = redColor
|
||||||
|
}
|
||||||
|
s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
|
||||||
|
|
||||||
|
status := ""
|
||||||
|
if summary.SuiteSucceeded {
|
||||||
|
status = s.colorize(boldStyle+greenColor, "SUCCESS!")
|
||||||
|
} else {
|
||||||
|
status = s.colorize(boldStyle+redColor, "FAIL!")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.print(0,
|
||||||
|
"%s -- %s | %s | %s | %s ",
|
||||||
|
status,
|
||||||
|
s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
|
||||||
|
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs),
|
||||||
|
s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
|
||||||
|
s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||||
|
s.startBlock()
|
||||||
|
for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
|
||||||
|
s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
|
||||||
|
}
|
||||||
|
|
||||||
|
indentation := 0
|
||||||
|
if len(spec.ComponentTexts) > 2 {
|
||||||
|
indentation = 1
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
index := len(spec.ComponentTexts) - 1
|
||||||
|
s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
|
||||||
|
s.printNewLine()
|
||||||
|
s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
|
||||||
|
s.printNewLine()
|
||||||
|
s.midBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.startBlock()
|
||||||
|
var message string
|
||||||
|
switch summary.State {
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
message = "Failure"
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
message = "Panic"
|
||||||
|
case types.SpecStateTimedOut:
|
||||||
|
message = "Timeout"
|
||||||
|
}
|
||||||
|
|
||||||
|
s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
|
||||||
|
|
||||||
|
indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
|
||||||
|
|
||||||
|
s.endBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
|
||||||
|
if output == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.startBlock()
|
||||||
|
s.println(0, output)
|
||||||
|
s.midBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
|
||||||
|
s.print(0, s.colorize(greenColor, s.denoter))
|
||||||
|
s.stream()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||||
|
s.printBlockWithMessage(
|
||||||
|
s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
|
||||||
|
"",
|
||||||
|
spec,
|
||||||
|
succinct,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||||
|
s.printBlockWithMessage(
|
||||||
|
s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
|
||||||
|
s.measurementReport(spec, succinct),
|
||||||
|
spec,
|
||||||
|
succinct,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||||
|
if noisy {
|
||||||
|
s.printBlockWithMessage(
|
||||||
|
s.colorize(yellowColor, "P [PENDING]"),
|
||||||
|
"",
|
||||||
|
spec,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
s.print(0, s.colorize(yellowColor, "P"))
|
||||||
|
s.stream()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
// Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
|
||||||
|
if succinct || spec.Failure == (types.SpecFailure{}) {
|
||||||
|
s.print(0, s.colorize(cyanColor, "S"))
|
||||||
|
s.stream()
|
||||||
|
} else {
|
||||||
|
s.startBlock()
|
||||||
|
s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||||
|
|
||||||
|
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
s.printSkip(indentation, spec.Failure)
|
||||||
|
s.endBlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||||
|
failingSpecs := []*types.SpecSummary{}
|
||||||
|
|
||||||
|
for _, summary := range summaries {
|
||||||
|
if summary.HasFailureState() {
|
||||||
|
failingSpecs = append(failingSpecs, summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(failingSpecs) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
s.printNewLine()
|
||||||
|
plural := "s"
|
||||||
|
if len(failingSpecs) == 1 {
|
||||||
|
plural = ""
|
||||||
|
}
|
||||||
|
s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
|
||||||
|
for _, summary := range failingSpecs {
|
||||||
|
s.printNewLine()
|
||||||
|
if summary.HasFailureState() {
|
||||||
|
if summary.TimedOut() {
|
||||||
|
s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
|
||||||
|
} else if summary.Panicked() {
|
||||||
|
s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
|
||||||
|
} else if summary.Failed() {
|
||||||
|
s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
|
||||||
|
}
|
||||||
|
s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) startBlock() {
|
||||||
|
if s.cursorState == cursorStateStreaming {
|
||||||
|
s.printNewLine()
|
||||||
|
s.printDelimiter()
|
||||||
|
} else if s.cursorState == cursorStateMidBlock {
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) midBlock() {
|
||||||
|
s.cursorState = cursorStateMidBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) endBlock() {
|
||||||
|
s.printDelimiter()
|
||||||
|
s.cursorState = cursorStateEndBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) stream() {
|
||||||
|
s.cursorState = cursorStateStreaming
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
|
||||||
|
s.startBlock()
|
||||||
|
s.println(0, header)
|
||||||
|
|
||||||
|
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
|
||||||
|
|
||||||
|
if message != "" {
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(indentation, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.endBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.startBlock()
|
||||||
|
s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||||
|
|
||||||
|
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
|
||||||
|
s.endBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
|
||||||
|
switch failedComponentType {
|
||||||
|
case types.SpecComponentTypeBeforeSuite:
|
||||||
|
return " in Suite Setup (BeforeSuite)"
|
||||||
|
case types.SpecComponentTypeAfterSuite:
|
||||||
|
return " in Suite Teardown (AfterSuite)"
|
||||||
|
case types.SpecComponentTypeBeforeEach:
|
||||||
|
return " in Spec Setup (BeforeEach)"
|
||||||
|
case types.SpecComponentTypeJustBeforeEach:
|
||||||
|
return " in Spec Setup (JustBeforeEach)"
|
||||||
|
case types.SpecComponentTypeAfterEach:
|
||||||
|
return " in Spec Teardown (AfterEach)"
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
|
||||||
|
s.println(indentation, s.colorize(cyanColor, spec.Message))
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(indentation, spec.Location.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
|
||||||
|
if state == types.SpecStatePanicked {
|
||||||
|
s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
|
||||||
|
s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
|
||||||
|
s.println(indentation, failure.Location.String())
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||||
|
s.println(indentation, failure.Location.FullStackTrace)
|
||||||
|
} else {
|
||||||
|
s.println(indentation, s.colorize(redColor, failure.Message))
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(indentation, failure.Location.String())
|
||||||
|
if fullTrace {
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||||
|
s.println(indentation, failure.Location.FullStackTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||||
|
startIndex := 1
|
||||||
|
indentation := 0
|
||||||
|
|
||||||
|
if len(componentTexts) == 1 {
|
||||||
|
startIndex = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := startIndex; i < len(componentTexts); i++ {
|
||||||
|
if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
|
||||||
|
color := redColor
|
||||||
|
if state == types.SpecStateSkipped {
|
||||||
|
color = cyanColor
|
||||||
|
}
|
||||||
|
blockType := ""
|
||||||
|
switch failedComponentType {
|
||||||
|
case types.SpecComponentTypeBeforeSuite:
|
||||||
|
blockType = "BeforeSuite"
|
||||||
|
case types.SpecComponentTypeAfterSuite:
|
||||||
|
blockType = "AfterSuite"
|
||||||
|
case types.SpecComponentTypeBeforeEach:
|
||||||
|
blockType = "BeforeEach"
|
||||||
|
case types.SpecComponentTypeJustBeforeEach:
|
||||||
|
blockType = "JustBeforeEach"
|
||||||
|
case types.SpecComponentTypeAfterEach:
|
||||||
|
blockType = "AfterEach"
|
||||||
|
case types.SpecComponentTypeIt:
|
||||||
|
blockType = "It"
|
||||||
|
case types.SpecComponentTypeMeasure:
|
||||||
|
blockType = "Measurement"
|
||||||
|
}
|
||||||
|
if succinct {
|
||||||
|
s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
|
||||||
|
} else {
|
||||||
|
s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
|
||||||
|
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
|
||||||
|
} else {
|
||||||
|
s.println(indentation, componentTexts[i])
|
||||||
|
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
indentation++
|
||||||
|
}
|
||||||
|
|
||||||
|
return indentation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||||
|
indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
|
||||||
|
|
||||||
|
if succinct {
|
||||||
|
if len(componentTexts) > 0 {
|
||||||
|
s.printNewLine()
|
||||||
|
s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
|
||||||
|
}
|
||||||
|
s.printNewLine()
|
||||||
|
indentation = 1
|
||||||
|
} else {
|
||||||
|
indentation--
|
||||||
|
}
|
||||||
|
|
||||||
|
return indentation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
|
||||||
|
orderedKeys := make([]string, len(measurements))
|
||||||
|
for key, measurement := range measurements {
|
||||||
|
orderedKeys[measurement.Order] = key
|
||||||
|
}
|
||||||
|
return orderedKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
|
||||||
|
if len(spec.Measurements) == 0 {
|
||||||
|
return "Found no measurements"
|
||||||
|
}
|
||||||
|
|
||||||
|
message := []string{}
|
||||||
|
orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
|
||||||
|
|
||||||
|
if succinct {
|
||||||
|
message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||||
|
for _, key := range orderedKeys {
|
||||||
|
measurement := spec.Measurements[key]
|
||||||
|
message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
|
||||||
|
s.colorize(boldStyle, "%s", measurement.Name),
|
||||||
|
measurement.SmallestLabel,
|
||||||
|
s.colorize(greenColor, "%.3f", measurement.Smallest),
|
||||||
|
measurement.Units,
|
||||||
|
measurement.AverageLabel,
|
||||||
|
s.colorize(cyanColor, "%.3f", measurement.Average),
|
||||||
|
measurement.Units,
|
||||||
|
s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
|
||||||
|
measurement.Units,
|
||||||
|
measurement.LargestLabel,
|
||||||
|
s.colorize(redColor, "%.3f", measurement.Largest),
|
||||||
|
measurement.Units,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||||
|
for _, key := range orderedKeys {
|
||||||
|
measurement := spec.Measurements[key]
|
||||||
|
info := ""
|
||||||
|
if measurement.Info != nil {
|
||||||
|
message = append(message, fmt.Sprintf("%v", measurement.Info))
|
||||||
|
}
|
||||||
|
|
||||||
|
message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
|
||||||
|
s.colorize(boldStyle, "%s", measurement.Name),
|
||||||
|
info,
|
||||||
|
measurement.SmallestLabel,
|
||||||
|
s.colorize(greenColor, "%.3f", measurement.Smallest),
|
||||||
|
measurement.Units,
|
||||||
|
measurement.LargestLabel,
|
||||||
|
s.colorize(redColor, "%.3f", measurement.Largest),
|
||||||
|
measurement.Units,
|
||||||
|
measurement.AverageLabel,
|
||||||
|
s.colorize(cyanColor, "%.3f", measurement.Average),
|
||||||
|
measurement.Units,
|
||||||
|
s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
|
||||||
|
measurement.Units,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(message, "\n")
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue