lnrpc: making PendingChannels req/resp naming scheme consistent

In this commit we rename the lnrpc.PendingChannelRequest and
lnrpc.PendingChannelResponse to
lnrpc.PendingChannelsRequest/lnrpc.PendingChannelsResponse. We do this
as we strive to ensure that the naming scheme across the RPC interface
is consistent.
This commit is contained in:
Olaoluwa Osuntokun 2018-01-04 14:20:25 -06:00
parent 445e11db5c
commit 7421584341
No known key found for this signature in database
GPG Key ID: 964EA263DD637C21
7 changed files with 426 additions and 421 deletions

View File

@ -901,7 +901,7 @@ func pendingChannels(ctx *cli.Context) error {
client, cleanUp := getClient(ctx)
defer cleanUp()
req := &lnrpc.PendingChannelRequest{}
req := &lnrpc.PendingChannelsRequest{}
resp, err := client.PendingChannels(ctxb, req)
if err != nil {
return err

View File

@ -211,7 +211,7 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest,
// If we didn't force close the transaction, at this point, the channel
// should now be marked as being in the state of "pending close".
if !force {
pendingChansRequest := &lnrpc.PendingChannelRequest{}
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := node.PendingChannels(ctx, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
@ -247,7 +247,7 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest,
// node's channels that are currently in a pending state (with a broadcast, but
// not confirmed funding transaction).
func numOpenChannelsPending(ctxt context.Context, node *lntest.HarnessNode) (int, error) {
pendingChansRequest := &lnrpc.PendingChannelRequest{}
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
resp, err := node.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
return 0, err
@ -985,11 +985,11 @@ func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) {
// findForceClosedChannel searches a pending channel response for a particular
// channel, returning the force closed channel upon success.
func findForceClosedChannel(t *harnessTest,
pendingChanResp *lnrpc.PendingChannelResponse,
op *wire.OutPoint) *lnrpc.PendingChannelResponse_ForceClosedChannel {
pendingChanResp *lnrpc.PendingChannelsResponse,
op *wire.OutPoint) *lnrpc.PendingChannelsResponse_ForceClosedChannel {
var found bool
var forceClose *lnrpc.PendingChannelResponse_ForceClosedChannel
var forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel
for _, forceClose = range pendingChanResp.PendingForceClosingChannels {
if forceClose.Channel.ChannelPoint == op.String() {
found = true
@ -1004,7 +1004,7 @@ func findForceClosedChannel(t *harnessTest,
}
func assertCommitmentMaturity(t *harnessTest,
forceClose *lnrpc.PendingChannelResponse_ForceClosedChannel,
forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
maturityHeight uint32, blocksTilMaturity int32) {
if forceClose.MaturityHeight != maturityHeight {
@ -1022,7 +1022,7 @@ func assertCommitmentMaturity(t *harnessTest,
// assertForceClosedChannelNumHtlcs verifies that a force closed channel has the
// proper number of htlcs.
func assertPendingChannelNumHtlcs(t *harnessTest,
forceClose *lnrpc.PendingChannelResponse_ForceClosedChannel,
forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
expectedNumHtlcs int) {
if len(forceClose.PendingHtlcs) != expectedNumHtlcs {
@ -1035,7 +1035,7 @@ func assertPendingChannelNumHtlcs(t *harnessTest,
// assertNumForceClosedChannels checks that a pending channel response has the
// expected number of force closed channels.
func assertNumForceClosedChannels(t *harnessTest,
pendingChanResp *lnrpc.PendingChannelResponse, expectedNumChans int) {
pendingChanResp *lnrpc.PendingChannelsResponse, expectedNumChans int) {
if len(pendingChanResp.PendingForceClosingChannels) != expectedNumChans {
t.Fatalf("expected to find %d force closed channels, got %d",
@ -1048,7 +1048,7 @@ func assertNumForceClosedChannels(t *harnessTest,
// belonging to a force closed channel, testing for the expeced stage number,
// blocks till maturity, and the maturity height.
func assertPendingHtlcStageAndMaturity(t *harnessTest,
forceClose *lnrpc.PendingChannelResponse_ForceClosedChannel,
forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
stage, maturityHeight uint32, blocksTillMaturity int32) {
for _, pendingHtlc := range forceClose.PendingHtlcs {
@ -1213,7 +1213,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) {
// Now that the channel has been force closed, it should show up in the
// PendingChannels RPC under the force close section.
pendingChansRequest := &lnrpc.PendingChannelRequest{}
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := net.Alice.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)

File diff suppressed because it is too large Load Diff

View File

@ -170,7 +170,7 @@ func request_Lightning_GetInfo_0(ctx context.Context, marshaler runtime.Marshale
}
func request_Lightning_PendingChannels_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq PendingChannelRequest
var protoReq PendingChannelsRequest
var metadata runtime.ServerMetadata
msg, err := client.PendingChannels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))

View File

@ -202,7 +202,7 @@ service Lightning {
workflow and is waiting for confirmations for the funding txn, or is in the
process of closure, either initiated cooperatively or non-cooperatively.
*/
rpc PendingChannels (PendingChannelRequest) returns (PendingChannelResponse) {
rpc PendingChannels (PendingChannelsRequest) returns (PendingChannelsResponse) {
option (google.api.http) = {
get: "/v1/channels/pending"
};
@ -913,8 +913,8 @@ message PendingHTLC {
uint32 stage = 6 [ json_name = "stage" ];
}
message PendingChannelRequest {}
message PendingChannelResponse {
message PendingChannelsRequest {}
message PendingChannelsResponse {
message PendingChannel {
string remote_node_pub = 1 [ json_name = "remote_node_pub" ];
string channel_point = 2 [ json_name = "channel_point" ];

View File

@ -109,7 +109,7 @@
"200": {
"description": "",
"schema": {
"$ref": "#/definitions/lnrpcPendingChannelResponse"
"$ref": "#/definitions/lnrpcPendingChannelsResponse"
}
}
},
@ -696,11 +696,11 @@
}
},
"definitions": {
"PendingChannelResponseClosedChannel": {
"PendingChannelsResponseClosedChannel": {
"type": "object",
"properties": {
"channel": {
"$ref": "#/definitions/PendingChannelResponsePendingChannel",
"$ref": "#/definitions/PendingChannelsResponsePendingChannel",
"title": "/ The pending channel to be closed"
},
"closing_txid": {
@ -709,11 +709,11 @@
}
}
},
"PendingChannelResponseForceClosedChannel": {
"PendingChannelsResponseForceClosedChannel": {
"type": "object",
"properties": {
"channel": {
"$ref": "#/definitions/PendingChannelResponsePendingChannel",
"$ref": "#/definitions/PendingChannelsResponsePendingChannel",
"title": "/ The pending channel to be force closed"
},
"closing_txid": {
@ -748,7 +748,7 @@
}
}
},
"PendingChannelResponsePendingChannel": {
"PendingChannelsResponsePendingChannel": {
"type": "object",
"properties": {
"remote_node_pub": {
@ -771,11 +771,11 @@
}
}
},
"PendingChannelResponsePendingOpenChannel": {
"PendingChannelsResponsePendingOpenChannel": {
"type": "object",
"properties": {
"channel": {
"$ref": "#/definitions/PendingChannelResponsePendingChannel",
"$ref": "#/definitions/PendingChannelsResponsePendingChannel",
"title": "/ The pending channel"
},
"confirmation_height": {
@ -1746,7 +1746,7 @@
}
}
},
"lnrpcPendingChannelResponse": {
"lnrpcPendingChannelsResponse": {
"type": "object",
"properties": {
"total_limbo_balance": {
@ -1757,21 +1757,21 @@
"pending_open_channels": {
"type": "array",
"items": {
"$ref": "#/definitions/PendingChannelResponsePendingOpenChannel"
"$ref": "#/definitions/PendingChannelsResponsePendingOpenChannel"
},
"title": "/ Channels pending opening"
},
"pending_closing_channels": {
"type": "array",
"items": {
"$ref": "#/definitions/PendingChannelResponseClosedChannel"
"$ref": "#/definitions/PendingChannelsResponseClosedChannel"
},
"title": "/ Channels pending closing"
},
"pending_force_closing_channels": {
"type": "array",
"items": {
"$ref": "#/definitions/PendingChannelResponseForceClosedChannel"
"$ref": "#/definitions/PendingChannelsResponseForceClosedChannel"
},
"title": "/ Channels pending force closing"
}

View File

@ -1257,7 +1257,7 @@ func (r *rpcServer) ChannelBalance(ctx context.Context,
// workflow and is waiting for confirmations for the funding txn, or is in the
// process of closure, either initiated cooperatively or non-cooperatively.
func (r *rpcServer) PendingChannels(ctx context.Context,
in *lnrpc.PendingChannelRequest) (*lnrpc.PendingChannelResponse, error) {
in *lnrpc.PendingChannelsRequest) (*lnrpc.PendingChannelsResponse, error) {
// Check macaroon to see if this is allowed.
if r.authSvc != nil {
@ -1274,7 +1274,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
rpcsLog.Debugf("[pendingchannels]")
resp := &lnrpc.PendingChannelResponse{}
resp := &lnrpc.PendingChannelsResponse{}
// First, we'll populate the response with all the channels that are
// soon to be opened. We can easily fetch this data from the database
@ -1283,7 +1283,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
if err != nil {
return nil, err
}
resp.PendingOpenChannels = make([]*lnrpc.PendingChannelResponse_PendingOpenChannel,
resp.PendingOpenChannels = make([]*lnrpc.PendingChannelsResponse_PendingOpenChannel,
len(pendingOpenChannels))
for i, pendingChan := range pendingOpenChannels {
pub := pendingChan.IdentityPub.SerializeCompressed()
@ -1302,8 +1302,8 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
targetConfHeight := pendingChan.FundingBroadcastHeight + uint32(pendingChan.NumConfsRequired)
blocksTillOpen := int32(targetConfHeight) - bestHeight
resp.PendingOpenChannels[i] = &lnrpc.PendingChannelResponse_PendingOpenChannel{
Channel: &lnrpc.PendingChannelResponse_PendingChannel{
resp.PendingOpenChannels[i] = &lnrpc.PendingChannelsResponse_PendingOpenChannel{
Channel: &lnrpc.PendingChannelsResponse_PendingChannel{
RemoteNodePub: hex.EncodeToString(pub),
ChannelPoint: pendingChan.FundingOutpoint.String(),
Capacity: int64(pendingChan.Capacity),
@ -1334,7 +1334,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
// needed regardless of how this channel was closed.
pub := pendingClose.RemotePub.SerializeCompressed()
chanPoint := pendingClose.ChanPoint
channel := &lnrpc.PendingChannelResponse_PendingChannel{
channel := &lnrpc.PendingChannelsResponse_PendingChannel{
RemoteNodePub: hex.EncodeToString(pub),
ChannelPoint: chanPoint.String(),
Capacity: int64(pendingClose.Capacity),
@ -1350,7 +1350,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
case channeldb.CooperativeClose:
resp.PendingClosingChannels = append(
resp.PendingClosingChannels,
&lnrpc.PendingChannelResponse_ClosedChannel{
&lnrpc.PendingChannelsResponse_ClosedChannel{
Channel: channel,
ClosingTxid: closeTXID,
},
@ -1361,7 +1361,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
// If the channel was force closed, then we'll need to query
// the utxoNursery for additional information.
case channeldb.ForceClose:
forceClose := &lnrpc.PendingChannelResponse_ForceClosedChannel{
forceClose := &lnrpc.PendingChannelsResponse_ForceClosedChannel{
Channel: channel,
ClosingTxid: closeTXID,
}