From 2f25c9a0d8f38b9deab4d0539bb594cd3e41e95b Mon Sep 17 00:00:00 2001 From: Federico Kunze <31522760+fedekunze@users.noreply.github.com> Date: Mon, 7 Sep 2020 14:46:48 +0200 Subject: [PATCH] x/ibc: solo machine signature format (#7237) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * x/ibc: solo machine signature format * update tests and verification * diversifier updates * update tests * fix lint * Update x/ibc/light-clients/solomachine/types/header_test.go Co-authored-by: Aditya * Apply suggestions from code review Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com> * update test * misbehaviour sign bytes * consensus state tests * fix tests * more fixes Co-authored-by: Aditya Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com> --- client/grpc-gateway/swagger.json | 713 ++++- client/grpc/reflection/reflection.pb.gw.go | 1 - client/grpc/simulate/simulate.pb.gw.go | 1 - .../solomachine/v1/solomachine.proto | 116 +- x/auth/types/query.pb.gw.go | 1 - x/bank/types/query.pb.gw.go | 1 - x/distribution/types/query.pb.gw.go | 1 - x/evidence/types/query.pb.gw.go | 1 - x/gov/types/query.pb.gw.go | 1 - x/ibc-transfer/types/query.pb.gw.go | 1 - x/ibc/02-client/types/codec_test.go | 73 +- x/ibc/02-client/types/msgs_test.go | 18 +- x/ibc/02-client/types/proposal_test.go | 26 +- x/ibc/02-client/types/query.pb.gw.go | 1 - x/ibc/03-connection/types/msgs_test.go | 4 +- x/ibc/03-connection/types/query.pb.gw.go | 1 - x/ibc/04-channel/types/query.pb.gw.go | 1 - .../solomachine/types/client_state.go | 44 +- .../solomachine/types/client_state_test.go | 33 +- .../solomachine/types/consensus_state.go | 5 + .../solomachine/types/consensus_state_test.go | 32 +- .../light-clients/solomachine/types/header.go | 10 + .../solomachine/types/header_test.go | 46 +- .../solomachine/types/misbehaviour_handle.go | 25 +- .../types/misbehaviour_handle_test.go | 38 +- .../light-clients/solomachine/types/proof.go | 298 +- .../solomachine/types/solomachine.pb.go | 2471 ++++++++++++++++- .../solomachine/types/solomachine_test.go | 2 +- .../light-clients/solomachine/types/update.go | 16 +- .../solomachine/types/update_test.go | 25 +- x/ibc/testing/chain.go | 3 +- x/ibc/testing/solomachine.go | 97 +- x/mint/types/query.pb.gw.go | 1 - x/params/types/proposal/query.pb.gw.go | 1 - x/slashing/types/query.pb.gw.go | 1 - x/staking/types/query.pb.gw.go | 1 - x/upgrade/types/query.pb.gw.go | 1 - 37 files changed, 3650 insertions(+), 461 deletions(-) diff --git a/client/grpc-gateway/swagger.json b/client/grpc-gateway/swagger.json index 3bf32f7df..f99d7fed2 100644 --- a/client/grpc-gateway/swagger.json +++ b/client/grpc-gateway/swagger.json @@ -5630,9 +5630,21 @@ "description": "PageResponse is to be embedded in gRPC response messages where the corresponding\nrequest message has used PageRequest.\n\n message SomeResponse {\n repeated Bar results = 1;\n PageResponse page = 2;\n }" }, "height": { - "type": "string", - "format": "int64", - "title": "query block height" + "title": "query block height", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "description": "QueryChannelsResponse is the response type for the Query/Channels RPC method." @@ -5790,9 +5802,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "description": "QueryChannelResponse is the response type for the Query/Channel RPC method.\nBesides the Channel end, it includes a proof and the height from which the\nproof was retrieved." @@ -5891,7 +5915,7 @@ "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := &pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } }, - "description": "IdentifiedClientState defines a client state with additional client identifier field." + "description": "IdentifiedClientState defines a client state with additional client\nidentifier field." }, "proof": { "type": "string", @@ -5903,9 +5927,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryChannelClientStateResponse is the Response type for the\nQuery/QueryChannelClientState RPC method" @@ -5969,7 +6005,7 @@ ] } }, - "/ibc/channel/v1beta1/channels/{channel_id}/ports/{port_id}/consensus_state/{height}": { + "/ibc/channel/v1beta1/channels/{channel_id}/ports/{port_id}/consensus_state/epoch/{epoch_number}/height/{epoch_height}": { "get": { "summary": "ChannelConsensusState queries for the consensus state for the channel\nassociated with the provided channel identifiers.", "operationId": "ChannelConsensusState", @@ -6009,9 +6045,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryChannelClientStateResponse is the Response type for the\nQuery/QueryChannelClientState RPC method" @@ -6070,8 +6118,16 @@ "type": "string" }, { - "name": "height", - "description": "height of the consensus state", + "name": "epoch_number", + "description": "epoch number of the consensus state", + "in": "path", + "required": true, + "type": "string", + "format": "uint64" + }, + { + "name": "epoch_height", + "description": "epoch height of the consensus state", "in": "path", "required": true, "type": "string", @@ -6108,9 +6164,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QuerySequenceResponse is the request type for the\nQuery/QueryNextSequenceReceiveResponse RPC method" @@ -6199,9 +6267,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryPacketAcknowledgementResponse defines the client query response for a\npacket which also includes a proof, its path and the height form which the\nproof was retrieved" @@ -6328,9 +6408,21 @@ "description": "PageResponse is to be embedded in gRPC response messages where the corresponding\nrequest message has used PageRequest.\n\n message SomeResponse {\n repeated Bar results = 1;\n PageResponse page = 2;\n }" }, "height": { - "type": "string", - "format": "int64", - "title": "query block height" + "title": "query block height", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryPacketCommitmentsResponse is the request type for the\nQuery/QueryPacketCommitments RPC method" @@ -6445,9 +6537,21 @@ "title": "list of unrelayed packet sequences" }, "height": { - "type": "string", - "format": "int64", - "title": "query block height" + "title": "query block height", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryUnrelayedPacketsResponse is the request type for the\nQuery/UnrelayedPacketCommitments RPC method" @@ -6557,9 +6661,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryPacketCommitmentResponse defines the client query response for a packet\nwhich also includes a proof, its path and the height form which the proof was\nretrieved" @@ -6726,9 +6842,21 @@ "description": "PageResponse is to be embedded in gRPC response messages where the corresponding\nrequest message has used PageRequest.\n\n message SomeResponse {\n repeated Bar results = 1;\n PageResponse page = 2;\n }" }, "height": { - "type": "string", - "format": "int64", - "title": "query block height" + "title": "query block height", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryConnectionChannelsResponse is the Response type for the\nQuery/QueryConnectionChannels RPC method" @@ -6853,7 +6981,7 @@ "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := &pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } }, - "description": "IdentifiedClientState defines a client state with additional client identifier field." + "description": "IdentifiedClientState defines a client state with additional client\nidentifier field." }, "description": "list of stored ClientStates of the chain." }, @@ -6990,9 +7118,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "description": "QueryClientStateResponse is the response type for the Query/ClientState RPC\nmethod. Besides the client state, it includes a proof and the height from\nwhich the proof was retrieved." @@ -7182,7 +7322,7 @@ ] } }, - "/ibc/client/v1beta1/consensus_states/{client_id}/{height}": { + "/ibc/client/v1beta1/consensus_states/{client_id}/epoch/{epoch_number}/height/{epoch_height}": { "get": { "summary": "ConsensusState queries a consensus state associated with a client state at a given height.", "operationId": "ConsensusState", @@ -7218,9 +7358,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryConsensusStateResponse is the response type for the Query/ConsensusState RPC method" @@ -7272,8 +7424,16 @@ "type": "string" }, { - "name": "height", - "description": "consensus state height", + "name": "epoch_number", + "description": "consensus state epoch number", + "in": "path", + "required": true, + "type": "string", + "format": "uint64" + }, + { + "name": "epoch_height", + "description": "consensus state epoch height", "in": "path", "required": true, "type": "string", @@ -7320,9 +7480,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was generated" + "title": "height at which the proof was generated", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryClientConnectionsResponse is the response type for the\nQuery/ClientConnections RPC method" @@ -7467,9 +7639,21 @@ "description": "PageResponse is to be embedded in gRPC response messages where the corresponding\nrequest message has used PageRequest.\n\n message SomeResponse {\n repeated Bar results = 1;\n PageResponse page = 2;\n }" }, "height": { - "type": "string", - "format": "int64", - "title": "query block height" + "title": "query block height", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "description": "QueryConnectionsResponse is the response type for the Query/Connections RPC\nmethod." @@ -7624,9 +7808,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "description": "QueryConnectionResponse is the response type for the Query/Connection RPC\nmethod. Besides the connection end, it includes a proof and the height from\nwhich the proof was retrieved." @@ -7718,7 +7914,7 @@ "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := &pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } }, - "description": "IdentifiedClientState defines a client state with additional client identifier field." + "description": "IdentifiedClientState defines a client state with additional client\nidentifier field." }, "proof": { "type": "string", @@ -7730,9 +7926,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryConnectionClientStateResponse is the response type for the\nQuery/ConnectionClientState RPC method" @@ -7789,7 +7997,7 @@ ] } }, - "/ibc/connection/v1beta1/connections/{connection_id}/consensus_state": { + "/ibc/connection/v1beta1/connections/{connection_id}/consensus_state/epoch/{epoch_number}/height/{epoch_height}": { "get": { "summary": "ConnectionConsensusState queries the consensus state associated with the\nconnection.", "operationId": "ConnectionConsensusState", @@ -7829,9 +8037,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryConnectionConsensusStateResponse is the response type for the\nQuery/ConnectionConsensusState RPC method" @@ -7883,9 +8103,16 @@ "type": "string" }, { - "name": "height", - "in": "query", - "required": false, + "name": "epoch_number", + "in": "path", + "required": true, + "type": "string", + "format": "uint64" + }, + { + "name": "epoch_height", + "in": "path", + "required": true, "type": "string", "format": "uint64" } @@ -7964,13 +8191,16 @@ "type": "object", "properties": { "type_url": { - "type": "string" + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." }, "value": { "type": "string", - "format": "byte" + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." } - } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := &pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } } } @@ -8065,13 +8295,16 @@ "type": "object", "properties": { "type_url": { - "type": "string" + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." }, "value": { "type": "string", - "format": "byte" + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." } - } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := &pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } } } @@ -8143,13 +8376,16 @@ "type": "object", "properties": { "type_url": { - "type": "string" + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." }, "value": { "type": "string", - "format": "byte" + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." } - } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := &pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } } } @@ -12079,7 +12315,7 @@ "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := &pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } }, - "description": "IdentifiedClientState defines a client state with additional client identifier field." + "description": "IdentifiedClientState defines a client state with additional client\nidentifier field." }, "proof": { "type": "string", @@ -12091,9 +12327,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryChannelClientStateResponse is the Response type for the\nQuery/QueryChannelClientState RPC method" @@ -12131,9 +12379,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryChannelClientStateResponse is the Response type for the\nQuery/QueryChannelClientState RPC method" @@ -12207,9 +12467,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "description": "QueryChannelResponse is the response type for the Query/Channel RPC method.\nBesides the Channel end, it includes a proof and the height from which the\nproof was retrieved." @@ -12302,9 +12574,21 @@ "description": "PageResponse is to be embedded in gRPC response messages where the corresponding\nrequest message has used PageRequest.\n\n message SomeResponse {\n repeated Bar results = 1;\n PageResponse page = 2;\n }" }, "height": { - "type": "string", - "format": "int64", - "title": "query block height" + "title": "query block height", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "description": "QueryChannelsResponse is the response type for the Query/Channels RPC method." @@ -12397,9 +12681,21 @@ "description": "PageResponse is to be embedded in gRPC response messages where the corresponding\nrequest message has used PageRequest.\n\n message SomeResponse {\n repeated Bar results = 1;\n PageResponse page = 2;\n }" }, "height": { - "type": "string", - "format": "int64", - "title": "query block height" + "title": "query block height", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryConnectionChannelsResponse is the Response type for the\nQuery/QueryConnectionChannels RPC method" @@ -12422,9 +12718,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QuerySequenceResponse is the request type for the\nQuery/QueryNextSequenceReceiveResponse RPC method" @@ -12447,9 +12755,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryPacketAcknowledgementResponse defines the client query response for a\npacket which also includes a proof, its path and the height form which the\nproof was retrieved" @@ -12472,9 +12792,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryPacketCommitmentResponse defines the client query response for a packet\nwhich also includes a proof, its path and the height form which the proof was\nretrieved" @@ -12527,9 +12859,21 @@ "description": "PageResponse is to be embedded in gRPC response messages where the corresponding\nrequest message has used PageRequest.\n\n message SomeResponse {\n repeated Bar results = 1;\n PageResponse page = 2;\n }" }, "height": { - "type": "string", - "format": "int64", - "title": "query block height" + "title": "query block height", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryPacketCommitmentsResponse is the request type for the\nQuery/QueryPacketCommitments RPC method" @@ -12546,9 +12890,21 @@ "title": "list of unrelayed packet sequences" }, "height": { - "type": "string", - "format": "int64", - "title": "query block height" + "title": "query block height", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryUnrelayedPacketsResponse is the request type for the\nQuery/UnrelayedPacketCommitments RPC method" @@ -12565,6 +12921,23 @@ "default": "STATE_UNINITIALIZED_UNSPECIFIED", "description": "State defines if a channel is in one of the following states:\nCLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED.\n\n - STATE_UNINITIALIZED_UNSPECIFIED: Default State\n - STATE_INIT: A channel has just started the opening handshake.\n - STATE_TRYOPEN: A channel has acknowledged the handshake step on the counterparty chain.\n - STATE_OPEN: A channel has completed the handshake. Open channels are\nready to send and receive packets.\n - STATE_CLOSED: A channel has been closed and can no longer be used to send or receive\npackets." }, + "ibc.client.Height": { + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset", + "title": "Height is a monotonically increasing data type\nthat can be compared against another Height for the purposes of updating and\nfreezing clients" + }, "ibc.client.IdentifiedClientState": { "type": "object", "properties": { @@ -12589,7 +12962,7 @@ "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := &pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } }, - "description": "IdentifiedClientState defines a client state with additional client identifier field." + "description": "IdentifiedClientState defines a client state with additional client\nidentifier field." }, "ibc.client.QueryClientStateResponse": { "type": "object", @@ -12620,9 +12993,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "description": "QueryClientStateResponse is the response type for the Query/ClientState RPC\nmethod. Besides the client state, it includes a proof and the height from\nwhich the proof was retrieved." @@ -12656,7 +13041,7 @@ "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := &pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } }, - "description": "IdentifiedClientState defines a client state with additional client identifier field." + "description": "IdentifiedClientState defines a client state with additional client\nidentifier field." }, "description": "list of stored ClientStates of the chain." }, @@ -12709,9 +13094,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryConsensusStateResponse is the response type for the Query/ConsensusState RPC method" @@ -12920,9 +13317,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was generated" + "title": "height at which the proof was generated", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryClientConnectionsResponse is the response type for the\nQuery/ClientConnections RPC method" @@ -12955,7 +13364,7 @@ "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := &pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" } }, - "description": "IdentifiedClientState defines a client state with additional client identifier field." + "description": "IdentifiedClientState defines a client state with additional client\nidentifier field." }, "proof": { "type": "string", @@ -12967,9 +13376,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryConnectionClientStateResponse is the response type for the\nQuery/ConnectionClientState RPC method" @@ -13007,9 +13428,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "title": "QueryConnectionConsensusStateResponse is the response type for the\nQuery/ConnectionConsensusState RPC method" @@ -13080,9 +13513,21 @@ "title": "merkle proof path" }, "proof_height": { - "type": "string", - "format": "uint64", - "title": "height at which the proof was retrieved" + "title": "height at which the proof was retrieved", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "description": "QueryConnectionResponse is the response type for the Query/Connection RPC\nmethod. Besides the connection end, it includes a proof and the height from\nwhich the proof was retrieved." @@ -13168,9 +13613,21 @@ "description": "PageResponse is to be embedded in gRPC response messages where the corresponding\nrequest message has used PageRequest.\n\n message SomeResponse {\n repeated Bar results = 1;\n PageResponse page = 2;\n }" }, "height": { - "type": "string", - "format": "int64", - "title": "query block height" + "title": "query block height", + "type": "object", + "properties": { + "epoch_number": { + "type": "string", + "format": "uint64", + "title": "the epoch that the client is currently on" + }, + "epoch_height": { + "type": "string", + "format": "uint64", + "title": "the height within the given epoch" + } + }, + "description": "Normally the EpochHeight is incremented at each height while keeping epoch\nnumber the same However some consensus algorithms may choose to reset the\nheight in certain conditions e.g. hard forks, state-machine breaking changes\nIn these cases, the epoch number is incremented so that height continues to\nbe monitonically increasing even as the EpochHeight gets reset" } }, "description": "QueryConnectionsResponse is the response type for the Query/Connections RPC\nmethod." diff --git a/client/grpc/reflection/reflection.pb.gw.go b/client/grpc/reflection/reflection.pb.gw.go index ab486750e..b3e8c4641 100644 --- a/client/grpc/reflection/reflection.pb.gw.go +++ b/client/grpc/reflection/reflection.pb.gw.go @@ -106,7 +106,6 @@ func local_request_ReflectionService_ListImplementations_0(ctx context.Context, // RegisterReflectionServiceHandlerServer registers the http handlers for service ReflectionService to "mux". // UnaryRPC :call ReflectionServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterReflectionServiceHandlerFromEndpoint instead. func RegisterReflectionServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ReflectionServiceServer) error { mux.Handle("GET", pattern_ReflectionService_ListAllInterfaces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/client/grpc/simulate/simulate.pb.gw.go b/client/grpc/simulate/simulate.pb.gw.go index 78cb85437..733449d03 100644 --- a/client/grpc/simulate/simulate.pb.gw.go +++ b/client/grpc/simulate/simulate.pb.gw.go @@ -70,7 +70,6 @@ func local_request_SimulateService_Simulate_0(ctx context.Context, marshaler run // RegisterSimulateServiceHandlerServer registers the http handlers for service SimulateService to "mux". // UnaryRPC :call SimulateServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterSimulateServiceHandlerFromEndpoint instead. func RegisterSimulateServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SimulateServiceServer) error { mux.Handle("POST", pattern_SimulateService_Simulate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/proto/ibc/lightclients/solomachine/v1/solomachine.proto b/proto/ibc/lightclients/solomachine/v1/solomachine.proto index a6463ef75..3c8c7a2f9 100644 --- a/proto/ibc/lightclients/solomachine/v1/solomachine.proto +++ b/proto/ibc/lightclients/solomachine/v1/solomachine.proto @@ -4,21 +4,21 @@ package ibc.lightclients.solomachine.v1; option go_package = "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/solomachine/types"; import "cosmos/base/crypto/v1beta1/crypto.proto"; +import "ibc/connection/connection.proto"; +import "ibc/channel/channel.proto"; import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; // ClientState defines a solo machine client that tracks the current consensus // state and if the client is frozen. message ClientState { option (gogoproto.goproto_getters) = false; // frozen sequence of the solo machine - uint64 frozen_sequence = 1 - [(gogoproto.moretags) = "yaml:\"frozen_sequence\""]; - ConsensusState consensus_state = 2 - [(gogoproto.moretags) = "yaml:\"consensus_state\""]; + uint64 frozen_sequence = 1 [(gogoproto.moretags) = "yaml:\"frozen_sequence\""]; + ConsensusState consensus_state = 2 [(gogoproto.moretags) = "yaml:\"consensus_state\""]; // when set to true, will allow governance to update a solo machine client. // The client will be unfrozen if it is frozen. - bool allow_update_after_proposal = 3 - [(gogoproto.moretags) = "yaml:\"allow_update_after_proposal\""]; + bool allow_update_after_proposal = 3 [(gogoproto.moretags) = "yaml:\"allow_update_after_proposal\""]; } // ConsensusState defines a solo machine consensus state @@ -27,19 +27,22 @@ message ConsensusState { // current sequence of the consensus state uint64 sequence = 1; // public key of the solo machine - cosmos.base.crypto.v1beta1.PublicKey public_key = 2 - [(gogoproto.moretags) = "yaml:\"public_key\""]; - uint64 timestamp = 3; + cosmos.base.crypto.v1beta1.PublicKey public_key = 2 [(gogoproto.moretags) = "yaml:\"public_key\""]; + // diversifier allows the same public key to be re-used across different solo machine clients + // (potentially on different chains) without being considered misbehaviour. + string diversifier = 3; + uint64 timestamp = 4; } // Header defines a solo machine consensus header message Header { option (gogoproto.goproto_getters) = false; // sequence to update solo machine public key at - uint64 sequence = 1; - bytes signature = 2; - cosmos.base.crypto.v1beta1.PublicKey new_public_key = 3 - [(gogoproto.moretags) = "yaml:\"new_public_key\""]; + uint64 sequence = 1; + uint64 timestamp = 2; + bytes signature = 3; + cosmos.base.crypto.v1beta1.PublicKey new_public_key = 4 [(gogoproto.moretags) = "yaml:\"new_public_key\""]; + string new_diversifier = 5 [(gogoproto.moretags) = "yaml:\"new_diversifier\""]; } // Misbehaviour defines misbehaviour for a solo machine which consists @@ -47,12 +50,10 @@ message Header { message Misbehaviour { option (gogoproto.goproto_getters) = false; option (gogoproto.goproto_stringer) = false; - string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""]; - uint64 sequence = 2; - SignatureAndData signature_one = 3 - [(gogoproto.moretags) = "yaml:\"signature_one\""]; - SignatureAndData signature_two = 4 - [(gogoproto.moretags) = "yaml:\"signature_two\""]; + string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""]; + uint64 sequence = 2; + SignatureAndData signature_one = 3 [(gogoproto.moretags) = "yaml:\"signature_one\""]; + SignatureAndData signature_two = 4 [(gogoproto.moretags) = "yaml:\"signature_two\""]; } // SignatureAndData contains a signature and the data signed over to create that @@ -71,3 +72,80 @@ message TimestampedSignature { uint64 timestamp = 2; } +// SignBytes defines the signed bytes used for signature verification. +message SignBytes { + option (gogoproto.goproto_getters) = false; + + uint64 sequence = 1; + uint64 timestamp = 2; + string diversifier = 3; + // marshaled data + bytes data = 4; +} + +// HeaderData returns the SignBytes data for misbehaviour verification. +message HeaderData { + option (gogoproto.goproto_getters) = false; + + // header public key + cosmos.base.crypto.v1beta1.PublicKey new_pub_key = 1 [(gogoproto.moretags) = "yaml:\"new_pub_key\""]; + // header diversifier + string new_diversifier = 2 [(gogoproto.moretags) = "yaml:\"new_diversifier\""]; +} + +// ClientStateData returns the SignBytes data for client state verification. +message ClientStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + google.protobuf.Any client_state = 2 [(gogoproto.moretags) = "yaml:\"client_state\""]; +} + +// ConsensusStateSignBytes returns the SignBytes data for consensus state verification. +message ConsensusStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + google.protobuf.Any consensus_state = 2 [(gogoproto.moretags) = "yaml:\"consensus_state\""]; +} + +// ConnectionStateSignBytes returns the SignBytes data for connection state verification. +message ConnectionStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + ibc.connection.ConnectionEnd connection = 2; +} + +// ChannelStateSignBytes returns the SignBytes data for channel state verification. +message ChannelStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + ibc.channel.Channel channel = 2; +} + +// PacketCommitmentSignBytes returns the SignBytes data for packet commitment verification. +message PacketCommitmentData { + bytes path = 1; + bytes commitment = 2; +} + +// PacketAcknowledgementSignBytes returns the SignBytes data for acknowledgement verification. +message PacketAcknowledgementData { + bytes path = 1; + bytes acknowledgement = 2; +} + +// PacketAcknowledgementAbsenceSignBytes returns the SignBytes data for acknowledgement absence +// verification. +message PacketAcknowledgementAbsenseData { + bytes path = 1; +} + +// NextSequenceRecv returns the SignBytes data for verification of the next +// sequence to be received. +message NextSequenceRecvData { + bytes path = 1; + uint64 next_seq_recv = 2 [(gogoproto.moretags) = "yaml:\"next_seq_recv\""]; +} \ No newline at end of file diff --git a/x/auth/types/query.pb.gw.go b/x/auth/types/query.pb.gw.go index 3b156842d..7e80fa6fa 100644 --- a/x/auth/types/query.pb.gw.go +++ b/x/auth/types/query.pb.gw.go @@ -106,7 +106,6 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Account_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/bank/types/query.pb.gw.go b/x/bank/types/query.pb.gw.go index ca9c1ddbf..85d776791 100644 --- a/x/bank/types/query.pb.gw.go +++ b/x/bank/types/query.pb.gw.go @@ -272,7 +272,6 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Balance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/distribution/types/query.pb.gw.go b/x/distribution/types/query.pb.gw.go index 890e51e40..b3a714f34 100644 --- a/x/distribution/types/query.pb.gw.go +++ b/x/distribution/types/query.pb.gw.go @@ -488,7 +488,6 @@ func local_request_Query_CommunityPool_0(ctx context.Context, marshaler runtime. // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/evidence/types/query.pb.gw.go b/x/evidence/types/query.pb.gw.go index e15d16352..e9346629c 100644 --- a/x/evidence/types/query.pb.gw.go +++ b/x/evidence/types/query.pb.gw.go @@ -124,7 +124,6 @@ func local_request_Query_AllEvidence_0(ctx context.Context, marshaler runtime.Ma // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Evidence_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/gov/types/query.pb.gw.go b/x/gov/types/query.pb.gw.go index e9fc72f30..4e2c61f5f 100644 --- a/x/gov/types/query.pb.gw.go +++ b/x/gov/types/query.pb.gw.go @@ -528,7 +528,6 @@ func local_request_Query_TallyResult_0(ctx context.Context, marshaler runtime.Ma // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Proposal_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/ibc-transfer/types/query.pb.gw.go b/x/ibc-transfer/types/query.pb.gw.go index 23b91aafc..9d5093a47 100644 --- a/x/ibc-transfer/types/query.pb.gw.go +++ b/x/ibc-transfer/types/query.pb.gw.go @@ -142,7 +142,6 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/ibc/02-client/types/codec_test.go b/x/ibc/02-client/types/codec_test.go index cab2a2b70..f6ba51b26 100644 --- a/x/ibc/02-client/types/codec_test.go +++ b/x/ibc/02-client/types/codec_test.go @@ -1,10 +1,6 @@ package types_test import ( - "testing" - - "github.com/stretchr/testify/require" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/x/ibc/02-client/types" ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/07-tendermint/types" @@ -20,7 +16,8 @@ type caseAny struct { expPass bool } -func TestPackClientState(t *testing.T) { +func (suite *TypesTestSuite) TestPackClientState() { + testCases := []struct { name string clientState exported.ClientState @@ -28,7 +25,7 @@ func TestPackClientState(t *testing.T) { }{ { "solo machine client", - ibctesting.NewSolomachine(t, "solomachine").ClientState(), + ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "").ClientState(), true, }, { @@ -53,9 +50,9 @@ func TestPackClientState(t *testing.T) { for _, tc := range testCases { clientAny, err := types.PackClientState(tc.clientState) if tc.expPass { - require.NoError(t, err, tc.name) + suite.Require().NoError(err, tc.name) } else { - require.Error(t, err, tc.name) + suite.Require().Error(err, tc.name) } testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass}) @@ -64,17 +61,15 @@ func TestPackClientState(t *testing.T) { for i, tc := range testCasesAny { cs, err := types.UnpackClientState(tc.any) if tc.expPass { - require.NoError(t, err, tc.name) - require.Equal(t, testCases[i].clientState, cs, tc.name) + suite.Require().NoError(err, tc.name) + suite.Require().Equal(testCases[i].clientState, cs, tc.name) } else { - require.Error(t, err, tc.name) + suite.Require().Error(err, tc.name) } } } -func TestPackConsensusState(t *testing.T) { - chain := ibctesting.NewTestChain(t, "cosmoshub") - +func (suite *TypesTestSuite) TestPackConsensusState() { testCases := []struct { name string consensusState exported.ConsensusState @@ -82,12 +77,12 @@ func TestPackConsensusState(t *testing.T) { }{ { "solo machine consensus", - ibctesting.NewSolomachine(t, "solomachine").ConsensusState(), + ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "").ConsensusState(), true, }, { "tendermint consensus", - chain.LastHeader.ConsensusState(), + suite.chain.LastHeader.ConsensusState(), true, }, { @@ -102,9 +97,9 @@ func TestPackConsensusState(t *testing.T) { for _, tc := range testCases { clientAny, err := types.PackConsensusState(tc.consensusState) if tc.expPass { - require.NoError(t, err, tc.name) + suite.Require().NoError(err, tc.name) } else { - require.Error(t, err, tc.name) + suite.Require().Error(err, tc.name) } testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass}) } @@ -112,17 +107,15 @@ func TestPackConsensusState(t *testing.T) { for i, tc := range testCasesAny { cs, err := types.UnpackConsensusState(tc.any) if tc.expPass { - require.NoError(t, err, tc.name) - require.Equal(t, testCases[i].consensusState, cs, tc.name) + suite.Require().NoError(err, tc.name) + suite.Require().Equal(testCases[i].consensusState, cs, tc.name) } else { - require.Error(t, err, tc.name) + suite.Require().Error(err, tc.name) } } } -func TestPackHeader(t *testing.T) { - chain := ibctesting.NewTestChain(t, "cosmoshub") - +func (suite *TypesTestSuite) TestPackHeader() { testCases := []struct { name string header exported.Header @@ -130,12 +123,12 @@ func TestPackHeader(t *testing.T) { }{ { "solo machine header", - ibctesting.NewSolomachine(t, "solomachine").CreateHeader(), + ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "").CreateHeader(), true, }, { "tendermint header", - chain.LastHeader, + suite.chain.LastHeader, true, }, { @@ -150,9 +143,9 @@ func TestPackHeader(t *testing.T) { for _, tc := range testCases { clientAny, err := types.PackHeader(tc.header) if tc.expPass { - require.NoError(t, err, tc.name) + suite.Require().NoError(err, tc.name) } else { - require.Error(t, err, tc.name) + suite.Require().Error(err, tc.name) } testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass}) @@ -161,17 +154,15 @@ func TestPackHeader(t *testing.T) { for i, tc := range testCasesAny { cs, err := types.UnpackHeader(tc.any) if tc.expPass { - require.NoError(t, err, tc.name) - require.Equal(t, testCases[i].header, cs, tc.name) + suite.Require().NoError(err, tc.name) + suite.Require().Equal(testCases[i].header, cs, tc.name) } else { - require.Error(t, err, tc.name) + suite.Require().Error(err, tc.name) } } } -func TestPackMisbehaviour(t *testing.T) { - chain := ibctesting.NewTestChain(t, "cosmoshub") - +func (suite *TypesTestSuite) TestPackMisbehaviour() { testCases := []struct { name string misbehaviour exported.Misbehaviour @@ -179,12 +170,12 @@ func TestPackMisbehaviour(t *testing.T) { }{ { "solo machine misbehaviour", - ibctesting.NewSolomachine(t, "solomachine").CreateMisbehaviour(), + ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "").CreateMisbehaviour(), true, }, { "tendermint misbehaviour", - ibctmtypes.NewMisbehaviour("tendermint", chain.ChainID, chain.LastHeader, chain.LastHeader), + ibctmtypes.NewMisbehaviour("tendermint", suite.chain.ChainID, suite.chain.LastHeader, suite.chain.LastHeader), true, }, { @@ -199,9 +190,9 @@ func TestPackMisbehaviour(t *testing.T) { for _, tc := range testCases { clientAny, err := types.PackMisbehaviour(tc.misbehaviour) if tc.expPass { - require.NoError(t, err, tc.name) + suite.Require().NoError(err, tc.name) } else { - require.Error(t, err, tc.name) + suite.Require().Error(err, tc.name) } testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass}) @@ -210,10 +201,10 @@ func TestPackMisbehaviour(t *testing.T) { for i, tc := range testCasesAny { cs, err := types.UnpackMisbehaviour(tc.any) if tc.expPass { - require.NoError(t, err, tc.name) - require.Equal(t, testCases[i].misbehaviour, cs, tc.name) + suite.Require().NoError(err, tc.name) + suite.Require().Equal(testCases[i].misbehaviour, cs, tc.name) } else { - require.Error(t, err, tc.name) + suite.Require().Error(err, tc.name) } } } diff --git a/x/ibc/02-client/types/msgs_test.go b/x/ibc/02-client/types/msgs_test.go index 4b9113084..8c7d60645 100644 --- a/x/ibc/02-client/types/msgs_test.go +++ b/x/ibc/02-client/types/msgs_test.go @@ -45,7 +45,7 @@ func (suite *TypesTestSuite) TestMarshalMsgCreateClient() { }{ { "solo machine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), "solomachine") + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "") msg, err = types.NewMsgCreateClient(soloMachine.ClientID, soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chain.SenderAccount.GetAddress()) suite.Require().NoError(err) }, @@ -145,7 +145,7 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() { { "valid - solomachine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), "solomachine") + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "") msg, err = types.NewMsgCreateClient(soloMachine.ClientID, soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chain.SenderAccount.GetAddress()) suite.Require().NoError(err) }, @@ -154,7 +154,7 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() { { "invalid solomachine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), "solomachine") + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "") msg, err = types.NewMsgCreateClient(soloMachine.ClientID, &solomachinetypes.ClientState{}, soloMachine.ConsensusState(), suite.chain.SenderAccount.GetAddress()) suite.Require().NoError(err) }, @@ -163,7 +163,7 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() { { "invalid solomachine consensus state", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), "solomachine") + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "") msg, err = types.NewMsgCreateClient(soloMachine.ClientID, soloMachine.ClientState(), &solomachinetypes.ConsensusState{}, suite.chain.SenderAccount.GetAddress()) suite.Require().NoError(err) }, @@ -205,7 +205,7 @@ func (suite *TypesTestSuite) TestMarshalMsgUpdateClient() { }{ { "solo machine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), "solomachine") + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "") msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(), suite.chain.SenderAccount.GetAddress()) suite.Require().NoError(err) }, @@ -294,7 +294,7 @@ func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() { { "valid - solomachine header", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), "solomachine") + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "") msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(), suite.chain.SenderAccount.GetAddress()) suite.Require().NoError(err) }, @@ -343,7 +343,7 @@ func (suite *TypesTestSuite) TestMarshalMsgSubmitMisbehaviour() { }{ { "solo machine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), "solomachine") + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "") msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chain.SenderAccount.GetAddress()) suite.Require().NoError(err) }, @@ -440,7 +440,7 @@ func (suite *TypesTestSuite) TestMsgSubmitMisbehaviour_ValidateBasic() { { "valid - solomachine misbehaviour", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), "solomachine") + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "") msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chain.SenderAccount.GetAddress()) suite.Require().NoError(err) }, @@ -457,7 +457,7 @@ func (suite *TypesTestSuite) TestMsgSubmitMisbehaviour_ValidateBasic() { { "client-id mismatch", func() { - soloMachineMisbehaviour := ibctesting.NewSolomachine(suite.T(), "solomachine").CreateMisbehaviour() + soloMachineMisbehaviour := ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, "solomachine", "").CreateMisbehaviour() msg, err = types.NewMsgSubmitMisbehaviour("external", soloMachineMisbehaviour, suite.chain.SenderAccount.GetAddress()) suite.Require().NoError(err) }, diff --git a/x/ibc/02-client/types/proposal_test.go b/x/ibc/02-client/types/proposal_test.go index 09ae1adc5..f43f0e3af 100644 --- a/x/ibc/02-client/types/proposal_test.go +++ b/x/ibc/02-client/types/proposal_test.go @@ -1,32 +1,28 @@ package types_test import ( - "testing" - - "github.com/stretchr/testify/require" - govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" "github.com/cosmos/cosmos-sdk/x/ibc/02-client/types" ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/07-tendermint/types" ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" ) -func TestNewUpdateClientProposal(t *testing.T) { +func (suite *TypesTestSuite) TestNewUpdateClientProposal() { p, err := types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, clientID, &ibctmtypes.Header{}) - require.NoError(t, err) - require.NotNil(t, p) + suite.Require().NoError(err) + suite.Require().NotNil(p) p, err = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, clientID, nil) - require.Error(t, err) - require.Nil(t, p) + suite.Require().Error(err) + suite.Require().Nil(p) } -func TestValidateBasic(t *testing.T) { +func (suite *TypesTestSuite) TestValidateBasic() { // use solo machine header for testing - solomachine := ibctesting.NewSolomachine(t, clientID) + solomachine := ibctesting.NewSolomachine(suite.T(), suite.chain.Codec, clientID, "") smHeader := solomachine.CreateHeader() header, err := types.PackHeader(smHeader) - require.NoError(t, err) + suite.Require().NoError(err) // use a different pointer so we don't modify 'header' smInvalidHeader := solomachine.CreateHeader() @@ -35,7 +31,7 @@ func TestValidateBasic(t *testing.T) { smInvalidHeader.Sequence = 0 invalidHeader, err := types.PackHeader(smInvalidHeader) - require.NoError(t, err) + suite.Require().NoError(err) testCases := []struct { name string @@ -69,9 +65,9 @@ func TestValidateBasic(t *testing.T) { err := tc.proposal.ValidateBasic() if tc.expPass { - require.NoError(t, err, tc.name) + suite.Require().NoError(err, tc.name) } else { - require.Error(t, err, tc.name) + suite.Require().Error(err, tc.name) } } } diff --git a/x/ibc/02-client/types/query.pb.gw.go b/x/ibc/02-client/types/query.pb.gw.go index 85d7a07e3..3fd7e0efa 100644 --- a/x/ibc/02-client/types/query.pb.gw.go +++ b/x/ibc/02-client/types/query.pb.gw.go @@ -312,7 +312,6 @@ func local_request_Query_ConsensusStates_0(ctx context.Context, marshaler runtim // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_ClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/ibc/03-connection/types/msgs_test.go b/x/ibc/03-connection/types/msgs_test.go index 5d0a0d84b..ccfc6a328 100644 --- a/x/ibc/03-connection/types/msgs_test.go +++ b/x/ibc/03-connection/types/msgs_test.go @@ -128,7 +128,7 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenTry() { types.NewMsgConnectionOpenTry("ibcconntest", "clienttotesta", "ibc/test", "clienttotest", clientState, prefix, []string{ibctesting.ConnectionVersion}, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), types.NewMsgConnectionOpenTry("ibcconntest", "clienttotesta", "connectiontotest", "test/conn1", clientState, prefix, []string{ibctesting.ConnectionVersion}, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), types.NewMsgConnectionOpenTry("ibcconntest", "clienttotesta", "connectiontotest", "clienttotest", nil, prefix, []string{ibctesting.ConnectionVersion}, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), - &types.MsgConnectionOpenTry{"ibcconntest", "clienttotesta", invalidAny, counterparty, []string{ibctesting.ConnectionVersion}, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer}, + {"ibcconntest", "clienttotesta", invalidAny, counterparty, []string{ibctesting.ConnectionVersion}, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer}, types.NewMsgConnectionOpenTry("ibcconntest", "clienttotesta", "connectiontotest", "clienttotest", invalidClient, prefix, []string{ibctesting.ConnectionVersion}, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), types.NewMsgConnectionOpenTry("ibcconntest", "clienttotesta", "connectiontotest", "clienttotest", clientState, emptyPrefix, []string{ibctesting.ConnectionVersion}, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), types.NewMsgConnectionOpenTry("ibcconntest", "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []string{}, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), @@ -196,7 +196,7 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenAck() { testMsgs := []*types.MsgConnectionOpenAck{ types.NewMsgConnectionOpenAck("test/conn1", clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), types.NewMsgConnectionOpenAck("ibcconntest", nil, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), - &types.MsgConnectionOpenAck{"ibcconntest", ibctesting.ConnectionVersion, invalidAny, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer}, + {"ibcconntest", ibctesting.ConnectionVersion, invalidAny, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer}, types.NewMsgConnectionOpenAck("ibcconntest", invalidClient, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), types.NewMsgConnectionOpenAck("ibcconntest", clientState, emptyProof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), types.NewMsgConnectionOpenAck("ibcconntest", clientState, suite.proof, emptyProof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), diff --git a/x/ibc/03-connection/types/query.pb.gw.go b/x/ibc/03-connection/types/query.pb.gw.go index 54c52c635..6e2f3323c 100644 --- a/x/ibc/03-connection/types/query.pb.gw.go +++ b/x/ibc/03-connection/types/query.pb.gw.go @@ -330,7 +330,6 @@ func local_request_Query_ConnectionConsensusState_0(ctx context.Context, marshal // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Connection_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/ibc/04-channel/types/query.pb.gw.go b/x/ibc/04-channel/types/query.pb.gw.go index 933cba2d7..1181eca5a 100644 --- a/x/ibc/04-channel/types/query.pb.gw.go +++ b/x/ibc/04-channel/types/query.pb.gw.go @@ -900,7 +900,6 @@ func local_request_Query_NextSequenceReceive_0(ctx context.Context, marshaler ru // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Channel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/ibc/light-clients/solomachine/types/client_state.go b/x/ibc/light-clients/solomachine/types/client_state.go index aa2c03f22..4a900d2ce 100644 --- a/x/ibc/light-clients/solomachine/types/client_state.go +++ b/x/ibc/light-clients/solomachine/types/client_state.go @@ -83,12 +83,12 @@ func (cs ClientState) VerifyClientState( return err } - data, err := ClientStateSignBytes(cdc, sequence, signature.Timestamp, path, clientState) + signBz, err := ClientStateSignBytes(cdc, sequence, signature.Timestamp, cs.ConsensusState.Diversifier, path, clientState) if err != nil { return err } - if err := VerifySignature(cs.ConsensusState.GetPubKey(), data, signature.Signature); err != nil { + if err := VerifySignature(cs.ConsensusState.GetPubKey(), signBz, signature.Signature); err != nil { return err } @@ -122,12 +122,12 @@ func (cs ClientState) VerifyClientConsensusState( return err } - data, err := ConsensusStateSignBytes(cdc, sequence, signature.Timestamp, path, consensusState) + signBz, err := ConsensusStateSignBytes(cdc, sequence, signature.Timestamp, cs.ConsensusState.Diversifier, path, consensusState) if err != nil { return err } - if err := VerifySignature(cs.ConsensusState.GetPubKey(), data, signature.Signature); err != nil { + if err := VerifySignature(cs.ConsensusState.GetPubKey(), signBz, signature.Signature); err != nil { return err } @@ -158,12 +158,12 @@ func (cs ClientState) VerifyConnectionState( return err } - data, err := ConnectionStateSignBytes(cdc, sequence, signature.Timestamp, path, connectionEnd) + signBz, err := ConnectionStateSignBytes(cdc, sequence, signature.Timestamp, cs.ConsensusState.Diversifier, path, connectionEnd) if err != nil { return err } - if err := VerifySignature(cs.ConsensusState.GetPubKey(), data, signature.Signature); err != nil { + if err := VerifySignature(cs.ConsensusState.GetPubKey(), signBz, signature.Signature); err != nil { return err } @@ -195,12 +195,12 @@ func (cs ClientState) VerifyChannelState( return err } - data, err := ChannelStateSignBytes(cdc, sequence, signature.Timestamp, path, channel) + signBz, err := ChannelStateSignBytes(cdc, sequence, signature.Timestamp, cs.ConsensusState.Diversifier, path, channel) if err != nil { return err } - if err := VerifySignature(cs.ConsensusState.GetPubKey(), data, signature.Signature); err != nil { + if err := VerifySignature(cs.ConsensusState.GetPubKey(), signBz, signature.Signature); err != nil { return err } @@ -233,9 +233,12 @@ func (cs ClientState) VerifyPacketCommitment( return err } - data := PacketCommitmentSignBytes(sequence, signature.Timestamp, path, commitmentBytes) + signBz, err := PacketCommitmentSignBytes(cdc, sequence, signature.Timestamp, cs.ConsensusState.Diversifier, path, commitmentBytes) + if err != nil { + return err + } - if err := VerifySignature(cs.ConsensusState.GetPubKey(), data, signature.Signature); err != nil { + if err := VerifySignature(cs.ConsensusState.GetPubKey(), signBz, signature.Signature); err != nil { return err } @@ -268,9 +271,12 @@ func (cs ClientState) VerifyPacketAcknowledgement( return err } - data := PacketAcknowledgementSignBytes(sequence, signature.Timestamp, path, acknowledgement) + signBz, err := PacketAcknowledgementSignBytes(cdc, sequence, signature.Timestamp, cs.ConsensusState.Diversifier, path, acknowledgement) + if err != nil { + return err + } - if err := VerifySignature(cs.ConsensusState.GetPubKey(), data, signature.Signature); err != nil { + if err := VerifySignature(cs.ConsensusState.GetPubKey(), signBz, signature.Signature); err != nil { return err } @@ -303,9 +309,12 @@ func (cs ClientState) VerifyPacketAcknowledgementAbsence( return err } - data := PacketAcknowledgementAbsenceSignBytes(sequence, signature.Timestamp, path) + signBz, err := PacketAcknowledgementAbsenceSignBytes(cdc, sequence, signature.Timestamp, cs.ConsensusState.Diversifier, path) + if err != nil { + return err + } - if err := VerifySignature(cs.ConsensusState.GetPubKey(), data, signature.Signature); err != nil { + if err := VerifySignature(cs.ConsensusState.GetPubKey(), signBz, signature.Signature); err != nil { return err } @@ -337,9 +346,12 @@ func (cs ClientState) VerifyNextSequenceRecv( return err } - data := NextSequenceRecvSignBytes(sequence, signature.Timestamp, path, nextSequenceRecv) + signBz, err := NextSequenceRecvSignBytes(cdc, sequence, signature.Timestamp, cs.ConsensusState.Diversifier, path, nextSequenceRecv) + if err != nil { + return err + } - if err := VerifySignature(cs.ConsensusState.GetPubKey(), data, signature.Signature); err != nil { + if err := VerifySignature(cs.ConsensusState.GetPubKey(), signBz, signature.Signature); err != nil { return err } diff --git a/x/ibc/light-clients/solomachine/types/client_state_test.go b/x/ibc/light-clients/solomachine/types/client_state_test.go index 16b5ffe54..1b9c6ad36 100644 --- a/x/ibc/light-clients/solomachine/types/client_state_test.go +++ b/x/ibc/light-clients/solomachine/types/client_state_test.go @@ -40,17 +40,22 @@ func (suite *SoloMachineTestSuite) TestClientStateValidateBasic() { }, { "sequence is zero", - types.NewClientState(&types.ConsensusState{0, suite.solomachine.ConsensusState().PublicKey, suite.solomachine.Time}, false), + types.NewClientState(&types.ConsensusState{0, suite.solomachine.ConsensusState().PublicKey, suite.solomachine.Diversifier, suite.solomachine.Time}, false), false, }, { - "timstamp is zero", - types.NewClientState(&types.ConsensusState{1, suite.solomachine.ConsensusState().PublicKey, 0}, false), + "timestamp is zero", + types.NewClientState(&types.ConsensusState{1, suite.solomachine.ConsensusState().PublicKey, suite.solomachine.Diversifier, 0}, false), + false, + }, + { + "diversifier is blank", + types.NewClientState(&types.ConsensusState{1, suite.solomachine.ConsensusState().PublicKey, " ", 1}, false), false, }, { "pubkey is empty", - types.NewClientState(&types.ConsensusState{suite.solomachine.Sequence, nil, suite.solomachine.Time}, false), + types.NewClientState(&types.ConsensusState{suite.solomachine.Sequence, nil, suite.solomachine.Diversifier, suite.solomachine.Time}, false), false, }, } @@ -80,7 +85,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientState() { path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath) suite.Require().NoError(err) - value, err := types.ClientStateSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, path, clientState) + value, err := types.ClientStateSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, suite.solomachine.Diversifier, path, clientState) suite.Require().NoError(err) sig, err := suite.solomachine.PrivateKey.Sign(value) @@ -205,7 +210,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientConsensusState() { path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath) suite.Require().NoError(err) - value, err := types.ConsensusStateSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, path, consensusState) + value, err := types.ConsensusStateSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, suite.solomachine.Diversifier, path, consensusState) suite.Require().NoError(err) sig, err := suite.solomachine.PrivateKey.Sign(value) @@ -326,7 +331,7 @@ func (suite *SoloMachineTestSuite) TestVerifyConnectionState() { path, err := commitmenttypes.ApplyPrefix(prefix, host.ConnectionPath(testConnectionID)) suite.Require().NoError(err) - value, err := types.ConnectionStateSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, path, conn) + value, err := types.ConnectionStateSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, suite.solomachine.Diversifier, path, conn) suite.Require().NoError(err) sig, err := suite.solomachine.PrivateKey.Sign(value) @@ -409,7 +414,7 @@ func (suite *SoloMachineTestSuite) TestVerifyChannelState() { path, err := commitmenttypes.ApplyPrefix(prefix, host.ChannelPath(testPortID, testChannelID)) suite.Require().NoError(err) - value, err := types.ChannelStateSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, path, ch) + value, err := types.ChannelStateSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, suite.solomachine.Diversifier, path, ch) suite.Require().NoError(err) sig, err := suite.solomachine.PrivateKey.Sign(value) @@ -490,7 +495,8 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketCommitment() { path, err := commitmenttypes.ApplyPrefix(prefix, host.PacketCommitmentPath(testPortID, testChannelID, suite.solomachine.Sequence)) suite.Require().NoError(err) - value := types.PacketCommitmentSignBytes(suite.solomachine.Sequence, suite.solomachine.Time, path, commitmentBytes) + value, err := types.PacketCommitmentSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, suite.solomachine.Diversifier, path, commitmentBytes) + suite.Require().NoError(err) sig, err := suite.solomachine.PrivateKey.Sign(value) suite.Require().NoError(err) @@ -570,7 +576,8 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketAcknowledgement() { path, err := commitmenttypes.ApplyPrefix(prefix, host.PacketAcknowledgementPath(testPortID, testChannelID, suite.solomachine.Sequence)) suite.Require().NoError(err) - value := types.PacketAcknowledgementSignBytes(suite.solomachine.Sequence, suite.solomachine.Time, path, ack) + value, err := types.PacketAcknowledgementSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, suite.solomachine.Diversifier, path, ack) + suite.Require().NoError(err) sig, err := suite.solomachine.PrivateKey.Sign(value) suite.Require().NoError(err) @@ -649,7 +656,8 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketAcknowledgementAbsence() { path, err := commitmenttypes.ApplyPrefix(prefix, host.PacketAcknowledgementPath(testPortID, testChannelID, suite.solomachine.Sequence)) suite.Require().NoError(err) - value := types.PacketAcknowledgementAbsenceSignBytes(suite.solomachine.Sequence, suite.solomachine.Time, path) + value, err := types.PacketAcknowledgementAbsenceSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, suite.solomachine.Diversifier, path) + suite.Require().NoError(err) sig, err := suite.solomachine.PrivateKey.Sign(value) suite.Require().NoError(err) @@ -729,7 +737,8 @@ func (suite *SoloMachineTestSuite) TestVerifyNextSeqRecv() { path, err := commitmenttypes.ApplyPrefix(prefix, host.NextSequenceRecvPath(testPortID, testChannelID)) suite.Require().NoError(err) - value := types.NextSequenceRecvSignBytes(suite.solomachine.Sequence, suite.solomachine.Time, path, nextSeqRecv) + value, err := types.NextSequenceRecvSignBytes(suite.chainA.Codec, suite.solomachine.Sequence, suite.solomachine.Time, suite.solomachine.Diversifier, path, nextSeqRecv) + suite.Require().NoError(err) sig, err := suite.solomachine.PrivateKey.Sign(value) suite.Require().NoError(err) diff --git a/x/ibc/light-clients/solomachine/types/consensus_state.go b/x/ibc/light-clients/solomachine/types/consensus_state.go index 37f43a986..583b53c15 100644 --- a/x/ibc/light-clients/solomachine/types/consensus_state.go +++ b/x/ibc/light-clients/solomachine/types/consensus_state.go @@ -1,6 +1,8 @@ package types import ( + "strings" + tmcrypto "github.com/tendermint/tendermint/crypto" "github.com/cosmos/cosmos-sdk/std" @@ -51,6 +53,9 @@ func (cs ConsensusState) ValidateBasic() error { if cs.Timestamp == 0 { return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "timestamp cannot be 0") } + if cs.Diversifier != "" && strings.TrimSpace(cs.Diversifier) == "" { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "diversifier cannot contain only spaces") + } if cs.PublicKey == nil || cs.GetPubKey() == nil || len(cs.GetPubKey().Bytes()) == 0 { return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "public key cannot be empty") } diff --git a/x/ibc/light-clients/solomachine/types/consensus_state_test.go b/x/ibc/light-clients/solomachine/types/consensus_state_test.go index 4d0a86421..f4443a982 100644 --- a/x/ibc/light-clients/solomachine/types/consensus_state_test.go +++ b/x/ibc/light-clients/solomachine/types/consensus_state_test.go @@ -29,16 +29,40 @@ func (suite *SoloMachineTestSuite) TestConsensusStateValidateBasic() { { "sequence is zero", &types.ConsensusState{ - Sequence: 0, - PublicKey: suite.solomachine.ConsensusState().PublicKey, + Sequence: 0, + PublicKey: suite.solomachine.ConsensusState().PublicKey, + Timestamp: suite.solomachine.Time, + Diversifier: suite.solomachine.Diversifier, + }, + false, + }, + { + "timestamp is zero", + &types.ConsensusState{ + Sequence: suite.solomachine.Sequence, + PublicKey: suite.solomachine.ConsensusState().PublicKey, + Timestamp: 0, + Diversifier: suite.solomachine.Diversifier, + }, + false, + }, + { + "diversifier is blank", + &types.ConsensusState{ + Sequence: suite.solomachine.Sequence, + PublicKey: suite.solomachine.ConsensusState().PublicKey, + Timestamp: suite.solomachine.Time, + Diversifier: " ", }, false, }, { "pubkey is nil", &types.ConsensusState{ - Sequence: suite.solomachine.Sequence, - PublicKey: nil, + Sequence: suite.solomachine.Sequence, + Timestamp: suite.solomachine.Time, + Diversifier: suite.solomachine.Diversifier, + PublicKey: nil, }, false, }, diff --git a/x/ibc/light-clients/solomachine/types/header.go b/x/ibc/light-clients/solomachine/types/header.go index e73560439..12af50f22 100644 --- a/x/ibc/light-clients/solomachine/types/header.go +++ b/x/ibc/light-clients/solomachine/types/header.go @@ -1,6 +1,8 @@ package types import ( + "strings" + tmcrypto "github.com/tendermint/tendermint/crypto" "github.com/cosmos/cosmos-sdk/std" @@ -40,6 +42,14 @@ func (h Header) ValidateBasic() error { return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "sequence number cannot be zero") } + if h.Timestamp == 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "timestamp cannot be zero") + } + + if h.NewDiversifier != "" && strings.TrimSpace(h.NewDiversifier) == "" { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "diversifier cannot contain only spaces") + } + if len(h.Signature) == 0 { return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "signature cannot be empty") } diff --git a/x/ibc/light-clients/solomachine/types/header_test.go b/x/ibc/light-clients/solomachine/types/header_test.go index c2088ea27..82688add7 100644 --- a/x/ibc/light-clients/solomachine/types/header_test.go +++ b/x/ibc/light-clients/solomachine/types/header_test.go @@ -21,27 +21,55 @@ func (suite *SoloMachineTestSuite) TestHeaderValidateBasic() { { "sequence is zero", &types.Header{ - Sequence: 0, - Signature: header.Signature, - NewPublicKey: header.NewPublicKey, + Sequence: 0, + Timestamp: header.Timestamp, + Signature: header.Signature, + NewPublicKey: header.NewPublicKey, + NewDiversifier: header.NewDiversifier, + }, + false, + }, + { + "timestamp is zero", + &types.Header{ + Sequence: header.Sequence, + Timestamp: 0, + Signature: header.Signature, + NewPublicKey: header.NewPublicKey, + NewDiversifier: header.NewDiversifier, }, false, }, { "signature is empty", &types.Header{ - Sequence: header.Sequence, - Signature: []byte{}, - NewPublicKey: header.NewPublicKey, + Sequence: header.Sequence, + Timestamp: header.Timestamp, + Signature: []byte{}, + NewPublicKey: header.NewPublicKey, + NewDiversifier: header.NewDiversifier, + }, + false, + }, + { + "diversifier contains only spaces", + &types.Header{ + Sequence: header.Sequence, + Timestamp: header.Timestamp, + Signature: header.Signature, + NewPublicKey: header.NewPublicKey, + NewDiversifier: " ", }, false, }, { "public key is nil", &types.Header{ - Sequence: header.Sequence, - Signature: header.Signature, - NewPublicKey: nil, + Sequence: header.Sequence, + Timestamp: header.Timestamp, + Signature: header.Signature, + NewPublicKey: nil, + NewDiversifier: header.NewDiversifier, }, false, }, diff --git a/x/ibc/light-clients/solomachine/types/misbehaviour_handle.go b/x/ibc/light-clients/solomachine/types/misbehaviour_handle.go index 3e4806245..b34af7ff2 100644 --- a/x/ibc/light-clients/solomachine/types/misbehaviour_handle.go +++ b/x/ibc/light-clients/solomachine/types/misbehaviour_handle.go @@ -30,7 +30,7 @@ func (cs ClientState) CheckMisbehaviourAndUpdateState( return nil, sdkerrors.Wrapf(clienttypes.ErrClientFrozen, "client is already frozen") } - if err := checkMisbehaviour(cs, soloMisbehaviour); err != nil { + if err := checkMisbehaviour(cdc, cs, soloMisbehaviour); err != nil { return nil, err } @@ -40,19 +40,36 @@ func (cs ClientState) CheckMisbehaviourAndUpdateState( // checkMisbehaviour checks if the currently registered public key has signed // over two different messages at the same sequence. +// // NOTE: a check that the misbehaviour message data are not equal is done by // misbehaviour.ValidateBasic which is called by the 02-client keeper. -func checkMisbehaviour(clientState ClientState, soloMisbehaviour *Misbehaviour) error { +func checkMisbehaviour(cdc codec.BinaryMarshaler, clientState ClientState, soloMisbehaviour *Misbehaviour) error { pubKey := clientState.ConsensusState.GetPubKey() - data := EvidenceSignBytes(soloMisbehaviour.Sequence, soloMisbehaviour.SignatureOne.Data) + data, err := MisbehaviourSignBytes( + cdc, + soloMisbehaviour.Sequence, clientState.ConsensusState.Timestamp, + clientState.ConsensusState.Diversifier, + soloMisbehaviour.SignatureOne.Data, + ) + if err != nil { + return err + } // check first signature if err := VerifySignature(pubKey, data, soloMisbehaviour.SignatureOne.Signature); err != nil { return sdkerrors.Wrap(err, "misbehaviour signature one failed to be verified") } - data = EvidenceSignBytes(soloMisbehaviour.Sequence, soloMisbehaviour.SignatureTwo.Data) + data, err = MisbehaviourSignBytes( + cdc, + soloMisbehaviour.Sequence, clientState.ConsensusState.Timestamp, + clientState.ConsensusState.Diversifier, + soloMisbehaviour.SignatureTwo.Data, + ) + if err != nil { + return err + } // check second signature if err := VerifySignature(pubKey, data, soloMisbehaviour.SignatureTwo.Signature); err != nil { diff --git a/x/ibc/light-clients/solomachine/types/misbehaviour_handle_test.go b/x/ibc/light-clients/solomachine/types/misbehaviour_handle_test.go index aec4677ba..80bd6d115 100644 --- a/x/ibc/light-clients/solomachine/types/misbehaviour_handle_test.go +++ b/x/ibc/light-clients/solomachine/types/misbehaviour_handle_test.go @@ -1,9 +1,9 @@ package types_test import ( - sdk "github.com/cosmos/cosmos-sdk/types" ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/07-tendermint/types" "github.com/cosmos/cosmos-sdk/x/ibc/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/solomachine/types" ) func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() { @@ -60,7 +60,14 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() { m := suite.solomachine.CreateMisbehaviour() msg := []byte("DATA ONE") - data := append(sdk.Uint64ToBigEndian(suite.solomachine.Sequence+1), msg...) + signBytes := &types.SignBytes{ + Sequence: suite.solomachine.Sequence + 1, + Data: msg, + } + + data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes) + suite.Require().NoError(err) + sig, err := suite.solomachine.PrivateKey.Sign(data) suite.Require().NoError(err) @@ -79,7 +86,14 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() { m := suite.solomachine.CreateMisbehaviour() msg := []byte("DATA TWO") - data := append(sdk.Uint64ToBigEndian(suite.solomachine.Sequence+1), msg...) + signBytes := &types.SignBytes{ + Sequence: suite.solomachine.Sequence + 1, + Data: msg, + } + + data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes) + suite.Require().NoError(err) + sig, err := suite.solomachine.PrivateKey.Sign(data) suite.Require().NoError(err) @@ -100,7 +114,14 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() { // Signature One msg := []byte("DATA ONE") // sequence used is plus 1 - data := append(sdk.Uint64ToBigEndian(suite.solomachine.Sequence+1), msg...) + signBytes := &types.SignBytes{ + Sequence: suite.solomachine.Sequence + 1, + Data: msg, + } + + data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes) + suite.Require().NoError(err) + sig, err := suite.solomachine.PrivateKey.Sign(data) suite.Require().NoError(err) @@ -110,7 +131,14 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() { // Signature Two msg = []byte("DATA TWO") // sequence used is minus 1 - data = append(sdk.Uint64ToBigEndian(suite.solomachine.Sequence-1), msg...) + + signBytes = &types.SignBytes{ + Sequence: suite.solomachine.Sequence - 1, + Data: msg, + } + data, err = suite.chainA.Codec.MarshalBinaryBare(signBytes) + suite.Require().NoError(err) + sig, err = suite.solomachine.PrivateKey.Sign(data) suite.Require().NoError(err) diff --git a/x/ibc/light-clients/solomachine/types/proof.go b/x/ibc/light-clients/solomachine/types/proof.go index 7beaec016..d13dfc0a8 100644 --- a/x/ibc/light-clients/solomachine/types/proof.go +++ b/x/ibc/light-clients/solomachine/types/proof.go @@ -4,7 +4,6 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/cosmos/cosmos-sdk/codec" - sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/02-client/types" connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/03-connection/types" @@ -23,193 +22,298 @@ func VerifySignature(pubKey crypto.PubKey, data, signature []byte) error { return nil } -// EvidenceSignBytes returns the sign bytes for verification of misbehaviour. -// -// Format: {sequence}{data} -func EvidenceSignBytes(sequence uint64, data []byte) []byte { - return append( - sdk.Uint64ToBigEndian(sequence), - data..., - ) +// MisbehaviourSignBytes returns the sign bytes for verification of misbehaviour. +func MisbehaviourSignBytes( + cdc codec.BinaryMarshaler, + sequence, timestamp uint64, + diversifier string, + data []byte) ([]byte, error) { + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + Data: data, + } + + return cdc.MarshalBinaryBare(signBytes) } // HeaderSignBytes returns the sign bytes for verification of misbehaviour. -// -// Format: {sequence}{header.newPubKey} -func HeaderSignBytes(header *Header) []byte { - return append( - sdk.Uint64ToBigEndian(header.Sequence), - header.GetPubKey().Bytes()..., - ) +func HeaderSignBytes( + cdc codec.BinaryMarshaler, + header *Header, +) ([]byte, error) { + data := &HeaderData{ + NewPubKey: header.NewPublicKey, + NewDiversifier: header.NewDiversifier, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: header.Sequence, + Timestamp: header.Timestamp, + Diversifier: header.NewDiversifier, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) } // ClientStateSignBytes returns the sign bytes for verification of the // client state. -// -// Format: {sequence}{timestamp}{path}{client-state} func ClientStateSignBytes( cdc codec.BinaryMarshaler, sequence, timestamp uint64, - path commitmenttypes.MerklePath, + diversifier string, + path commitmenttypes.MerklePath, // nolint: interfacer clientState exported.ClientState, ) ([]byte, error) { - bz, err := codec.MarshalAny(cdc, clientState) + any, err := clienttypes.PackClientState(clientState) if err != nil { return nil, err } - // sequence + timestamp + path + client state - return append( - combineSequenceTimestampPath(sequence, timestamp, path), - bz..., - ), nil + data := &ClientStateData{ + Path: []byte(path.String()), + ClientState: any, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) } // ConsensusStateSignBytes returns the sign bytes for verification of the // consensus state. -// -// Format: {sequence}{timestamp}{path}{consensus-state} func ConsensusStateSignBytes( cdc codec.BinaryMarshaler, sequence, timestamp uint64, - path commitmenttypes.MerklePath, + diversifier string, + path commitmenttypes.MerklePath, // nolint: interfacer consensusState exported.ConsensusState, ) ([]byte, error) { - bz, err := codec.MarshalAny(cdc, consensusState) + any, err := clienttypes.PackConsensusState(consensusState) if err != nil { return nil, err } - // sequence + timestamp + path + consensus state - return append( - combineSequenceTimestampPath(sequence, timestamp, path), - bz..., - ), nil + data := &ConsensusStateData{ + Path: []byte(path.String()), + ConsensusState: any, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) } // ConnectionStateSignBytes returns the sign bytes for verification of the // connection state. -// -// Format: {sequence}{timestamp}{path}{connection-end} func ConnectionStateSignBytes( cdc codec.BinaryMarshaler, sequence, timestamp uint64, - path commitmenttypes.MerklePath, + diversifier string, + path commitmenttypes.MerklePath, // nolint: interfacer connectionEnd exported.ConnectionI, ) ([]byte, error) { connection, ok := connectionEnd.(connectiontypes.ConnectionEnd) if !ok { - return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, "invalid connection type %T", connectionEnd) + return nil, sdkerrors.Wrapf( + connectiontypes.ErrInvalidConnection, + "expected type %T, got %T", connectiontypes.ConnectionEnd{}, connectionEnd, + ) } - bz, err := cdc.MarshalBinaryBare(&connection) + data := &ConnectionStateData{ + Path: []byte(path.String()), + Connection: &connection, + } + + dataBz, err := cdc.MarshalBinaryBare(data) if err != nil { return nil, err } - // sequence + timestamp + path + connection end - return append( - combineSequenceTimestampPath(sequence, timestamp, path), - bz..., - ), nil + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) } // ChannelStateSignBytes returns the sign bytes for verification of the // channel state. -// -// Format: {sequence}{timestamp}{path}{channel-end} func ChannelStateSignBytes( cdc codec.BinaryMarshaler, sequence, timestamp uint64, - path commitmenttypes.MerklePath, + diversifier string, + path commitmenttypes.MerklePath, // nolint: interfacer channelEnd exported.ChannelI, ) ([]byte, error) { channel, ok := channelEnd.(channeltypes.Channel) if !ok { - return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, "invalid channel type %T", channelEnd) + return nil, sdkerrors.Wrapf( + channeltypes.ErrInvalidChannel, + "expected channel type %T, got %T", channeltypes.Channel{}, channelEnd) } - bz, err := cdc.MarshalBinaryBare(&channel) + data := &ChannelStateData{ + Path: []byte(path.String()), + Channel: &channel, + } + + dataBz, err := cdc.MarshalBinaryBare(data) if err != nil { return nil, err } - // sequence + timestamp + path + channel - return append( - combineSequenceTimestampPath(sequence, timestamp, path), - bz..., - ), nil + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) } // PacketCommitmentSignBytes returns the sign bytes for verification of the // packet commitment. -// -// Format: {sequence}{timestamp}{path}{commitment-bytes} func PacketCommitmentSignBytes( + cdc codec.BinaryMarshaler, sequence, timestamp uint64, - path commitmenttypes.MerklePath, + diversifier string, + path commitmenttypes.MerklePath, // nolint: interfacer commitmentBytes []byte, -) []byte { +) ([]byte, error) { + data := &PacketCommitmentData{ + Path: []byte(path.String()), + Commitment: commitmentBytes, + } - // sequence + timestamp + path + commitment bytes - return append( - combineSequenceTimestampPath(sequence, timestamp, path), - commitmentBytes..., - ) + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) } // PacketAcknowledgementSignBytes returns the sign bytes for verification of // the acknowledgement. -// -// Format: {sequence}{timestamp}{path}{acknowledgement} func PacketAcknowledgementSignBytes( + cdc codec.BinaryMarshaler, sequence, timestamp uint64, - path commitmenttypes.MerklePath, + diversifier string, + path commitmenttypes.MerklePath, // nolint: interfacer acknowledgement []byte, -) []byte { +) ([]byte, error) { + data := &PacketAcknowledgementData{ + Path: []byte(path.String()), + Acknowledgement: acknowledgement, + } - // sequence + timestamp + path + acknowledgement - return append( - combineSequenceTimestampPath(sequence, timestamp, path), - acknowledgement..., - ) + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) } // PacketAcknowledgementAbsenceSignBytes returns the sign bytes for verification // of the absence of an acknowledgement. -// -// Format: {sequence}{timestamp}{path} func PacketAcknowledgementAbsenceSignBytes( + cdc codec.BinaryMarshaler, sequence, timestamp uint64, - path commitmenttypes.MerklePath, -) []byte { - // value = sequence + timestamp + path - return combineSequenceTimestampPath(sequence, timestamp, path) + diversifier string, + path commitmenttypes.MerklePath, // nolint: interfacer +) ([]byte, error) { + data := &PacketAcknowledgementAbsenseData{ + Path: []byte(path.String()), + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) } -// NextSequenceRecv returns the sign bytes for verification of the next +// NextSequenceRecvSignBytes returns the sign bytes for verification of the next // sequence to be received. -// -// Format: {sequence}{timestamp}{path}{next-sequence-recv} func NextSequenceRecvSignBytes( + cdc codec.BinaryMarshaler, sequence, timestamp uint64, - path commitmenttypes.MerklePath, + diversifier string, + path commitmenttypes.MerklePath, // nolint: interfacer nextSequenceRecv uint64, -) []byte { +) ([]byte, error) { + data := &NextSequenceRecvData{ + Path: []byte(path.String()), + NextSeqRecv: nextSequenceRecv, + } - // sequence + timestamp + path + nextSequenceRecv - return append( - combineSequenceTimestampPath(sequence, timestamp, path), - sdk.Uint64ToBigEndian(nextSequenceRecv)..., - ) -} + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } -// combineSequenceTimestampPath combines the sequence, the timestamp and -// the path into one byte slice. -func combineSequenceTimestampPath(sequence, timestamp uint64, path commitmenttypes.MerklePath) []byte { - bz := append(sdk.Uint64ToBigEndian(sequence), sdk.Uint64ToBigEndian(timestamp)...) - return append( - bz, - []byte(path.String())..., - ) + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) } diff --git a/x/ibc/light-clients/solomachine/types/solomachine.pb.go b/x/ibc/light-clients/solomachine/types/solomachine.pb.go index 907689ba7..46dcfd908 100644 --- a/x/ibc/light-clients/solomachine/types/solomachine.pb.go +++ b/x/ibc/light-clients/solomachine/types/solomachine.pb.go @@ -5,7 +5,10 @@ package types import ( fmt "fmt" + types1 "github.com/cosmos/cosmos-sdk/codec/types" types "github.com/cosmos/cosmos-sdk/crypto/types" + types2 "github.com/cosmos/cosmos-sdk/x/ibc/03-connection/types" + types3 "github.com/cosmos/cosmos-sdk/x/ibc/04-channel/types" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" io "io" @@ -74,7 +77,10 @@ type ConsensusState struct { Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` // public key of the solo machine PublicKey *types.PublicKey `protobuf:"bytes,2,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty" yaml:"public_key"` - Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // diversifier allows the same public key to be re-used across different solo machine clients + // (potentially on different chains) without being considered misbehaviour. + Diversifier string `protobuf:"bytes,3,opt,name=diversifier,proto3" json:"diversifier,omitempty"` + Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (m *ConsensusState) Reset() { *m = ConsensusState{} } @@ -113,9 +119,11 @@ var xxx_messageInfo_ConsensusState proto.InternalMessageInfo // Header defines a solo machine consensus header type Header struct { // sequence to update solo machine public key at - Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` - Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` - NewPublicKey *types.PublicKey `protobuf:"bytes,3,opt,name=new_public_key,json=newPublicKey,proto3" json:"new_public_key,omitempty" yaml:"new_public_key"` + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` + NewPublicKey *types.PublicKey `protobuf:"bytes,4,opt,name=new_public_key,json=newPublicKey,proto3" json:"new_public_key,omitempty" yaml:"new_public_key"` + NewDiversifier string `protobuf:"bytes,5,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"` } func (m *Header) Reset() { *m = Header{} } @@ -272,6 +280,451 @@ func (m *TimestampedSignature) XXX_DiscardUnknown() { var xxx_messageInfo_TimestampedSignature proto.InternalMessageInfo +// SignBytes defines the signed bytes used for signature verification. +type SignBytes struct { + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Diversifier string `protobuf:"bytes,3,opt,name=diversifier,proto3" json:"diversifier,omitempty"` + // marshaled data + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *SignBytes) Reset() { *m = SignBytes{} } +func (m *SignBytes) String() string { return proto.CompactTextString(m) } +func (*SignBytes) ProtoMessage() {} +func (*SignBytes) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{6} +} +func (m *SignBytes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignBytes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignBytes) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignBytes.Merge(m, src) +} +func (m *SignBytes) XXX_Size() int { + return m.Size() +} +func (m *SignBytes) XXX_DiscardUnknown() { + xxx_messageInfo_SignBytes.DiscardUnknown(m) +} + +var xxx_messageInfo_SignBytes proto.InternalMessageInfo + +// HeaderData returns the SignBytes data for misbehaviour verification. +type HeaderData struct { + // header public key + NewPubKey *types.PublicKey `protobuf:"bytes,1,opt,name=new_pub_key,json=newPubKey,proto3" json:"new_pub_key,omitempty" yaml:"new_pub_key"` + // header diversifier + NewDiversifier string `protobuf:"bytes,2,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"` +} + +func (m *HeaderData) Reset() { *m = HeaderData{} } +func (m *HeaderData) String() string { return proto.CompactTextString(m) } +func (*HeaderData) ProtoMessage() {} +func (*HeaderData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{7} +} +func (m *HeaderData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HeaderData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HeaderData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HeaderData) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeaderData.Merge(m, src) +} +func (m *HeaderData) XXX_Size() int { + return m.Size() +} +func (m *HeaderData) XXX_DiscardUnknown() { + xxx_messageInfo_HeaderData.DiscardUnknown(m) +} + +var xxx_messageInfo_HeaderData proto.InternalMessageInfo + +// ClientStateData returns the SignBytes data for client state verification. +type ClientStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + ClientState *types1.Any `protobuf:"bytes,2,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"` +} + +func (m *ClientStateData) Reset() { *m = ClientStateData{} } +func (m *ClientStateData) String() string { return proto.CompactTextString(m) } +func (*ClientStateData) ProtoMessage() {} +func (*ClientStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{8} +} +func (m *ClientStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStateData.Merge(m, src) +} +func (m *ClientStateData) XXX_Size() int { + return m.Size() +} +func (m *ClientStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStateData proto.InternalMessageInfo + +// ConsensusStateSignBytes returns the SignBytes data for consensus state verification. +type ConsensusStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + ConsensusState *types1.Any `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"` +} + +func (m *ConsensusStateData) Reset() { *m = ConsensusStateData{} } +func (m *ConsensusStateData) String() string { return proto.CompactTextString(m) } +func (*ConsensusStateData) ProtoMessage() {} +func (*ConsensusStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{9} +} +func (m *ConsensusStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusStateData.Merge(m, src) +} +func (m *ConsensusStateData) XXX_Size() int { + return m.Size() +} +func (m *ConsensusStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusStateData proto.InternalMessageInfo + +// ConnectionStateSignBytes returns the SignBytes data for connection state verification. +type ConnectionStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Connection *types2.ConnectionEnd `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` +} + +func (m *ConnectionStateData) Reset() { *m = ConnectionStateData{} } +func (m *ConnectionStateData) String() string { return proto.CompactTextString(m) } +func (*ConnectionStateData) ProtoMessage() {} +func (*ConnectionStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{10} +} +func (m *ConnectionStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectionStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionStateData.Merge(m, src) +} +func (m *ConnectionStateData) XXX_Size() int { + return m.Size() +} +func (m *ConnectionStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionStateData proto.InternalMessageInfo + +// ChannelStateSignBytes returns the SignBytes data for channel state verification. +type ChannelStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Channel *types3.Channel `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel,omitempty"` +} + +func (m *ChannelStateData) Reset() { *m = ChannelStateData{} } +func (m *ChannelStateData) String() string { return proto.CompactTextString(m) } +func (*ChannelStateData) ProtoMessage() {} +func (*ChannelStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{11} +} +func (m *ChannelStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChannelStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChannelStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChannelStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChannelStateData.Merge(m, src) +} +func (m *ChannelStateData) XXX_Size() int { + return m.Size() +} +func (m *ChannelStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ChannelStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ChannelStateData proto.InternalMessageInfo + +// PacketCommitmentSignBytes returns the SignBytes data for packet commitment verification. +type PacketCommitmentData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Commitment []byte `protobuf:"bytes,2,opt,name=commitment,proto3" json:"commitment,omitempty"` +} + +func (m *PacketCommitmentData) Reset() { *m = PacketCommitmentData{} } +func (m *PacketCommitmentData) String() string { return proto.CompactTextString(m) } +func (*PacketCommitmentData) ProtoMessage() {} +func (*PacketCommitmentData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{12} +} +func (m *PacketCommitmentData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketCommitmentData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketCommitmentData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketCommitmentData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketCommitmentData.Merge(m, src) +} +func (m *PacketCommitmentData) XXX_Size() int { + return m.Size() +} +func (m *PacketCommitmentData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketCommitmentData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketCommitmentData proto.InternalMessageInfo + +func (m *PacketCommitmentData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *PacketCommitmentData) GetCommitment() []byte { + if m != nil { + return m.Commitment + } + return nil +} + +// PacketAcknowledgementSignBytes returns the SignBytes data for acknowledgement verification. +type PacketAcknowledgementData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Acknowledgement []byte `protobuf:"bytes,2,opt,name=acknowledgement,proto3" json:"acknowledgement,omitempty"` +} + +func (m *PacketAcknowledgementData) Reset() { *m = PacketAcknowledgementData{} } +func (m *PacketAcknowledgementData) String() string { return proto.CompactTextString(m) } +func (*PacketAcknowledgementData) ProtoMessage() {} +func (*PacketAcknowledgementData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{13} +} +func (m *PacketAcknowledgementData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketAcknowledgementData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketAcknowledgementData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketAcknowledgementData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketAcknowledgementData.Merge(m, src) +} +func (m *PacketAcknowledgementData) XXX_Size() int { + return m.Size() +} +func (m *PacketAcknowledgementData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketAcknowledgementData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketAcknowledgementData proto.InternalMessageInfo + +func (m *PacketAcknowledgementData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *PacketAcknowledgementData) GetAcknowledgement() []byte { + if m != nil { + return m.Acknowledgement + } + return nil +} + +// PacketAcknowledgementAbsenceSignBytes returns the SignBytes data for acknowledgement absence +// verification. +type PacketAcknowledgementAbsenseData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (m *PacketAcknowledgementAbsenseData) Reset() { *m = PacketAcknowledgementAbsenseData{} } +func (m *PacketAcknowledgementAbsenseData) String() string { return proto.CompactTextString(m) } +func (*PacketAcknowledgementAbsenseData) ProtoMessage() {} +func (*PacketAcknowledgementAbsenseData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{14} +} +func (m *PacketAcknowledgementAbsenseData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketAcknowledgementAbsenseData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketAcknowledgementAbsenseData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketAcknowledgementAbsenseData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketAcknowledgementAbsenseData.Merge(m, src) +} +func (m *PacketAcknowledgementAbsenseData) XXX_Size() int { + return m.Size() +} +func (m *PacketAcknowledgementAbsenseData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketAcknowledgementAbsenseData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketAcknowledgementAbsenseData proto.InternalMessageInfo + +func (m *PacketAcknowledgementAbsenseData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +// NextSequenceRecv returns the SignBytes data for verification of the next +// sequence to be received. +type NextSequenceRecvData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + NextSeqRecv uint64 `protobuf:"varint,2,opt,name=next_seq_recv,json=nextSeqRecv,proto3" json:"next_seq_recv,omitempty" yaml:"next_seq_recv"` +} + +func (m *NextSequenceRecvData) Reset() { *m = NextSequenceRecvData{} } +func (m *NextSequenceRecvData) String() string { return proto.CompactTextString(m) } +func (*NextSequenceRecvData) ProtoMessage() {} +func (*NextSequenceRecvData) Descriptor() ([]byte, []int) { + return fileDescriptor_6cc2ee18f7f86d4e, []int{15} +} +func (m *NextSequenceRecvData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NextSequenceRecvData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NextSequenceRecvData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NextSequenceRecvData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextSequenceRecvData.Merge(m, src) +} +func (m *NextSequenceRecvData) XXX_Size() int { + return m.Size() +} +func (m *NextSequenceRecvData) XXX_DiscardUnknown() { + xxx_messageInfo_NextSequenceRecvData.DiscardUnknown(m) +} + +var xxx_messageInfo_NextSequenceRecvData proto.InternalMessageInfo + +func (m *NextSequenceRecvData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *NextSequenceRecvData) GetNextSeqRecv() uint64 { + if m != nil { + return m.NextSeqRecv + } + return 0 +} + func init() { proto.RegisterType((*ClientState)(nil), "ibc.lightclients.solomachine.v1.ClientState") proto.RegisterType((*ConsensusState)(nil), "ibc.lightclients.solomachine.v1.ConsensusState") @@ -279,6 +732,16 @@ func init() { proto.RegisterType((*Misbehaviour)(nil), "ibc.lightclients.solomachine.v1.Misbehaviour") proto.RegisterType((*SignatureAndData)(nil), "ibc.lightclients.solomachine.v1.SignatureAndData") proto.RegisterType((*TimestampedSignature)(nil), "ibc.lightclients.solomachine.v1.TimestampedSignature") + proto.RegisterType((*SignBytes)(nil), "ibc.lightclients.solomachine.v1.SignBytes") + proto.RegisterType((*HeaderData)(nil), "ibc.lightclients.solomachine.v1.HeaderData") + proto.RegisterType((*ClientStateData)(nil), "ibc.lightclients.solomachine.v1.ClientStateData") + proto.RegisterType((*ConsensusStateData)(nil), "ibc.lightclients.solomachine.v1.ConsensusStateData") + proto.RegisterType((*ConnectionStateData)(nil), "ibc.lightclients.solomachine.v1.ConnectionStateData") + proto.RegisterType((*ChannelStateData)(nil), "ibc.lightclients.solomachine.v1.ChannelStateData") + proto.RegisterType((*PacketCommitmentData)(nil), "ibc.lightclients.solomachine.v1.PacketCommitmentData") + proto.RegisterType((*PacketAcknowledgementData)(nil), "ibc.lightclients.solomachine.v1.PacketAcknowledgementData") + proto.RegisterType((*PacketAcknowledgementAbsenseData)(nil), "ibc.lightclients.solomachine.v1.PacketAcknowledgementAbsenseData") + proto.RegisterType((*NextSequenceRecvData)(nil), "ibc.lightclients.solomachine.v1.NextSequenceRecvData") } func init() { @@ -286,48 +749,71 @@ func init() { } var fileDescriptor_6cc2ee18f7f86d4e = []byte{ - // 649 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x6e, 0xd3, 0x4e, - 0x10, 0x8e, 0xd3, 0xa8, 0x4a, 0xb6, 0xf9, 0xa5, 0xfd, 0x59, 0x29, 0x0a, 0xa1, 0x8a, 0x2b, 0x4b, - 0x40, 0x2f, 0xb5, 0x65, 0xb8, 0xf5, 0x56, 0x97, 0x03, 0x7f, 0x84, 0xa8, 0xdc, 0x22, 0x21, 0x40, - 0xb2, 0xd6, 0xf6, 0x34, 0x59, 0xd5, 0xd9, 0x35, 0xde, 0x75, 0x42, 0x78, 0x02, 0x8e, 0x1c, 0x39, - 0xf2, 0x02, 0x48, 0x5c, 0x78, 0x07, 0x24, 0x2e, 0x3d, 0x72, 0x8a, 0x50, 0xfb, 0x06, 0x79, 0x02, - 0x14, 0xef, 0x26, 0x8d, 0xa3, 0xaa, 0x15, 0x12, 0x27, 0xef, 0xce, 0x8c, 0xbf, 0xfd, 0xbe, 0xf9, - 0x46, 0x83, 0x1c, 0x12, 0x84, 0x76, 0x4c, 0xba, 0x3d, 0x11, 0xc6, 0x04, 0xa8, 0xe0, 0x36, 0x67, - 0x31, 0xeb, 0xe3, 0xb0, 0x47, 0x28, 0xd8, 0x03, 0x67, 0xf1, 0x6a, 0x25, 0x29, 0x13, 0x4c, 0x37, - 0x48, 0x10, 0x5a, 0x8b, 0xbf, 0x58, 0x8b, 0x35, 0x03, 0xa7, 0x7d, 0x3f, 0x64, 0xbc, 0xcf, 0xb8, - 0x1d, 0x60, 0x0e, 0x76, 0x98, 0x8e, 0x12, 0xc1, 0xec, 0x81, 0x13, 0x80, 0xc0, 0x8e, 0xba, 0x4a, - 0xa4, 0x76, 0xb3, 0xcb, 0xba, 0x2c, 0x3f, 0xda, 0xd3, 0x93, 0x8c, 0x9a, 0xdf, 0xcb, 0x68, 0xed, - 0x20, 0x47, 0x3e, 0x12, 0x58, 0x80, 0x7e, 0x80, 0xd6, 0x4f, 0x52, 0xf6, 0x01, 0xa8, 0xcf, 0xe1, - 0x5d, 0x06, 0x34, 0x84, 0x96, 0xb6, 0xad, 0xed, 0x54, 0xdc, 0xf6, 0x64, 0x6c, 0xdc, 0x1a, 0xe1, - 0x7e, 0xbc, 0x67, 0x2e, 0x15, 0x98, 0x5e, 0x43, 0x46, 0x8e, 0x54, 0x40, 0x17, 0x68, 0x3d, 0x64, - 0x94, 0x03, 0xe5, 0x19, 0xf7, 0xf9, 0x14, 0xb7, 0x55, 0xde, 0xd6, 0x76, 0xd6, 0x1e, 0xd8, 0xd6, - 0x0d, 0x72, 0xac, 0x83, 0xd9, 0x7f, 0x39, 0x9d, 0xc5, 0x57, 0x97, 0x10, 0x4d, 0xaf, 0x11, 0x16, - 0x6a, 0x75, 0x40, 0x77, 0x70, 0x1c, 0xb3, 0xa1, 0x9f, 0x25, 0x11, 0x16, 0xe0, 0xe3, 0x13, 0x01, - 0xa9, 0x9f, 0xa4, 0x2c, 0x61, 0x1c, 0xc7, 0xad, 0x95, 0x6d, 0x6d, 0xa7, 0xea, 0xde, 0x9b, 0x8c, - 0x0d, 0x53, 0x02, 0x5e, 0x53, 0x6c, 0x7a, 0xad, 0x3c, 0xfb, 0x32, 0x4f, 0xee, 0x4f, 0x73, 0x87, - 0x2a, 0xb5, 0x57, 0xf9, 0xf8, 0xc5, 0x28, 0x99, 0x5f, 0x35, 0xd4, 0x28, 0x72, 0xd5, 0xdb, 0xa8, - 0x5a, 0xec, 0x99, 0x37, 0xbf, 0xeb, 0x6f, 0x10, 0x4a, 0xb2, 0x20, 0x26, 0xa1, 0x7f, 0x0a, 0x23, - 0xd5, 0x8c, 0xbb, 0x96, 0xb4, 0xce, 0x9a, 0x5a, 0x67, 0x29, 0xaf, 0x94, 0x75, 0xd6, 0x61, 0x5e, - 0xfd, 0x0c, 0x46, 0xee, 0xe6, 0x64, 0x6c, 0xfc, 0x2f, 0x19, 0x5f, 0x42, 0x98, 0x5e, 0x2d, 0x99, - 0x55, 0xe8, 0x5b, 0xa8, 0x26, 0x48, 0x1f, 0xb8, 0xc0, 0xfd, 0x24, 0x97, 0x59, 0xf1, 0x2e, 0x03, - 0x8a, 0xef, 0x37, 0x0d, 0xad, 0x3e, 0x06, 0x1c, 0x41, 0x7a, 0x2d, 0xcf, 0x2d, 0x54, 0xe3, 0xa4, - 0x4b, 0xb1, 0xc8, 0x52, 0xe9, 0x59, 0xdd, 0xbb, 0x0c, 0xe8, 0x27, 0xa8, 0x41, 0x61, 0xe8, 0x2f, - 0x28, 0x59, 0xf9, 0x1b, 0x25, 0xb7, 0x27, 0x63, 0x63, 0x53, 0x2a, 0x29, 0xc2, 0x98, 0x5e, 0x9d, - 0xc2, 0x70, 0x5e, 0xa8, 0x28, 0xff, 0x2c, 0xa3, 0xfa, 0x73, 0xc2, 0x03, 0xe8, 0xe1, 0x01, 0x61, - 0x59, 0xaa, 0x3b, 0xa8, 0x26, 0xa7, 0xc6, 0x27, 0x51, 0xce, 0xbc, 0xe6, 0x36, 0x27, 0x63, 0x63, - 0x43, 0xcd, 0xc7, 0x2c, 0x65, 0x7a, 0x55, 0x79, 0x7e, 0x12, 0x15, 0xb4, 0x96, 0x97, 0xb4, 0x26, - 0xe8, 0xbf, 0xb9, 0x34, 0x9f, 0x51, 0x50, 0x62, 0x9c, 0x1b, 0x67, 0xf4, 0x68, 0xf6, 0xd7, 0x3e, - 0x8d, 0x1e, 0x61, 0x81, 0xdd, 0xd6, 0x64, 0x6c, 0x34, 0x25, 0x8b, 0x02, 0xa2, 0xe9, 0xd5, 0xe7, - 0xf7, 0x17, 0x74, 0xe9, 0x45, 0x31, 0x64, 0xad, 0xca, 0x3f, 0x7d, 0x51, 0x0c, 0xd9, 0xe2, 0x8b, - 0xc7, 0x43, 0xb6, 0x57, 0x9d, 0x76, 0xf2, 0xf3, 0xb4, 0x9b, 0x4f, 0xd1, 0xc6, 0x32, 0x4a, 0xd1, - 0x6d, 0x6d, 0xd9, 0x6d, 0x1d, 0x55, 0x22, 0x2c, 0xb0, 0x1a, 0x83, 0xfc, 0xac, 0x9c, 0x79, 0x85, - 0x9a, 0xc7, 0xb3, 0xf9, 0x82, 0x68, 0x0e, 0x7b, 0x03, 0x5e, 0x61, 0x4c, 0xcb, 0x57, 0x8e, 0xa9, - 0xfb, 0xf6, 0xc7, 0x79, 0x47, 0x3b, 0x3b, 0xef, 0x68, 0xbf, 0xcf, 0x3b, 0xda, 0xa7, 0x8b, 0x4e, - 0xe9, 0xec, 0xa2, 0x53, 0xfa, 0x75, 0xd1, 0x29, 0xbd, 0x76, 0xbb, 0x44, 0xf4, 0xb2, 0xc0, 0x0a, - 0x59, 0xdf, 0x56, 0x2b, 0x4f, 0x7e, 0x76, 0x79, 0x74, 0x6a, 0xbf, 0xb7, 0xe7, 0xab, 0x75, 0xf7, - 0xaa, 0xdd, 0x2a, 0x46, 0x09, 0xf0, 0x60, 0x35, 0xdf, 0x79, 0x0f, 0xff, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x86, 0xeb, 0x8b, 0x5f, 0x88, 0x05, 0x00, 0x00, + // 1023 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x41, 0x6f, 0xe3, 0x44, + 0x14, 0xae, 0x43, 0x58, 0x9a, 0x97, 0x6c, 0x5b, 0xbc, 0xd9, 0x25, 0x2d, 0x10, 0x57, 0x96, 0x80, + 0x5e, 0xd6, 0x56, 0x16, 0x89, 0x43, 0x05, 0x87, 0x24, 0x8b, 0x04, 0x8b, 0x80, 0xca, 0x5d, 0x24, + 0x16, 0x16, 0x59, 0x63, 0xfb, 0x25, 0xb1, 0xea, 0xcc, 0x18, 0x7b, 0x92, 0x34, 0x48, 0x1c, 0xb8, + 0xc1, 0x0d, 0x89, 0x0b, 0x47, 0xfe, 0x04, 0xfc, 0x06, 0x24, 0x0e, 0xec, 0x91, 0x53, 0x84, 0xda, + 0x7f, 0x90, 0x5f, 0x80, 0x3c, 0x1e, 0x27, 0x76, 0x68, 0x52, 0x15, 0xf6, 0xe4, 0x99, 0xf7, 0xde, + 0x7c, 0xef, 0xcd, 0x37, 0xef, 0xf9, 0x3d, 0x68, 0xf9, 0x8e, 0x6b, 0x06, 0x7e, 0x7f, 0xc0, 0xdd, + 0xc0, 0x47, 0xca, 0x63, 0x33, 0x66, 0x01, 0x1b, 0x12, 0x77, 0xe0, 0x53, 0x34, 0xc7, 0xad, 0xfc, + 0xd6, 0x08, 0x23, 0xc6, 0x99, 0xaa, 0xf9, 0x8e, 0x6b, 0xe4, 0x8f, 0x18, 0x79, 0x9b, 0x71, 0xeb, + 0xe0, 0x2d, 0x97, 0xc5, 0x43, 0x16, 0x9b, 0x0e, 0x89, 0xd1, 0x74, 0xa3, 0x69, 0xc8, 0x99, 0x39, + 0x6e, 0x39, 0xc8, 0x49, 0x4b, 0x6e, 0x53, 0xa4, 0x83, 0x04, 0xc9, 0x74, 0x19, 0xa5, 0xe8, 0x72, + 0x9f, 0xd1, 0xdc, 0x52, 0x1a, 0xec, 0x0b, 0x83, 0x01, 0xa1, 0x14, 0x83, 0xec, 0x2b, 0x55, 0xf5, + 0x3e, 0xeb, 0x33, 0xb1, 0x34, 0x93, 0x55, 0x76, 0xa0, 0xcf, 0x58, 0x3f, 0x40, 0x53, 0xec, 0x9c, + 0x51, 0xcf, 0x24, 0x74, 0x9a, 0xaa, 0xf4, 0x5f, 0x4b, 0x50, 0xed, 0x8a, 0x80, 0x4f, 0x39, 0xe1, + 0xa8, 0x76, 0x61, 0xb7, 0x17, 0xb1, 0x6f, 0x90, 0xda, 0x31, 0x7e, 0x3d, 0x42, 0xea, 0x62, 0x43, + 0x39, 0x54, 0x8e, 0xca, 0x9d, 0x83, 0xf9, 0x4c, 0xbb, 0x37, 0x25, 0xc3, 0xe0, 0x58, 0x5f, 0x31, + 0xd0, 0xad, 0x9d, 0x54, 0x72, 0x2a, 0x05, 0x2a, 0x87, 0x5d, 0x97, 0xd1, 0x18, 0x69, 0x3c, 0x8a, + 0xed, 0x38, 0xc1, 0x6d, 0x94, 0x0e, 0x95, 0xa3, 0xea, 0x03, 0xd3, 0xb8, 0x86, 0x25, 0xa3, 0x9b, + 0x9d, 0x13, 0xe1, 0xe4, 0xbd, 0xae, 0x20, 0xea, 0xd6, 0x8e, 0x5b, 0xb0, 0x55, 0x11, 0x5e, 0x25, + 0x41, 0xc0, 0x26, 0xf6, 0x28, 0xf4, 0x08, 0x47, 0x9b, 0xf4, 0x38, 0x46, 0x76, 0x18, 0xb1, 0x90, + 0xc5, 0x24, 0x68, 0xbc, 0x70, 0xa8, 0x1c, 0x6d, 0x77, 0xde, 0x9c, 0xcf, 0x34, 0x3d, 0x05, 0xdc, + 0x60, 0xac, 0x5b, 0x0d, 0xa1, 0xfd, 0x4c, 0x28, 0xdb, 0x89, 0xee, 0x44, 0xaa, 0x8e, 0xcb, 0xdf, + 0xff, 0xa2, 0x6d, 0xe9, 0x7f, 0x2a, 0xb0, 0x53, 0x8c, 0x55, 0x3d, 0x80, 0xed, 0x22, 0x67, 0xd6, + 0x62, 0xaf, 0x7e, 0x09, 0x10, 0x8e, 0x9c, 0xc0, 0x77, 0xed, 0x33, 0x9c, 0x4a, 0x32, 0xde, 0x30, + 0xd2, 0x8c, 0x30, 0x92, 0x8c, 0x30, 0x64, 0x0a, 0xc8, 0x8c, 0x30, 0x4e, 0x84, 0xf5, 0x47, 0x38, + 0xed, 0xdc, 0x9d, 0xcf, 0xb4, 0x97, 0xd3, 0x88, 0x97, 0x10, 0xba, 0x55, 0x09, 0x33, 0x0b, 0xf5, + 0x10, 0xaa, 0x9e, 0x3f, 0xc6, 0x28, 0xf6, 0x7b, 0x3e, 0x46, 0xe2, 0xa2, 0x15, 0x2b, 0x2f, 0x52, + 0x5f, 0x83, 0x0a, 0xf7, 0x87, 0x18, 0x73, 0x32, 0x0c, 0x1b, 0x65, 0x11, 0xdb, 0x52, 0x20, 0x6f, + 0xf4, 0x53, 0x09, 0x6e, 0x7d, 0x80, 0xc4, 0xc3, 0x68, 0xe3, 0x4d, 0x0a, 0x50, 0xa5, 0x15, 0xa8, + 0x44, 0x1b, 0xfb, 0x7d, 0x4a, 0xf8, 0x28, 0x42, 0x11, 0x48, 0xcd, 0x5a, 0x0a, 0xd4, 0x1e, 0xec, + 0x50, 0x9c, 0xd8, 0x39, 0x26, 0xca, 0x37, 0x61, 0x62, 0x7f, 0x3e, 0xd3, 0xee, 0xa6, 0x4c, 0x14, + 0x61, 0x74, 0xab, 0x46, 0x71, 0xb2, 0x30, 0x4c, 0x92, 0x38, 0x31, 0xc8, 0x93, 0xf2, 0x62, 0x42, + 0x4a, 0x3e, 0x9d, 0x56, 0x0c, 0x74, 0x2b, 0x09, 0xed, 0xe1, 0x52, 0x20, 0x59, 0xf9, 0xa3, 0x04, + 0xb5, 0x8f, 0xfd, 0xd8, 0xc1, 0x01, 0x19, 0xfb, 0x6c, 0x14, 0xa9, 0x2d, 0xa8, 0xa4, 0xa9, 0x6b, + 0xfb, 0x9e, 0x20, 0xa7, 0xd2, 0xa9, 0xcf, 0x67, 0xda, 0x9e, 0x4c, 0xd2, 0x4c, 0xa5, 0x5b, 0xdb, + 0xe9, 0xfa, 0x43, 0xaf, 0x40, 0x67, 0x69, 0x85, 0xce, 0x10, 0x6e, 0x2f, 0xf8, 0xb1, 0x19, 0x4d, + 0x49, 0xab, 0x3e, 0x68, 0x5d, 0x5b, 0x28, 0xa7, 0xd9, 0xa9, 0x36, 0xf5, 0x1e, 0x12, 0x4e, 0x3a, + 0x8d, 0xf9, 0x4c, 0xab, 0xa7, 0x51, 0x14, 0x10, 0x75, 0xab, 0xb6, 0xd8, 0x7f, 0x4a, 0x57, 0x3c, + 0xf2, 0x09, 0x93, 0x6f, 0xf0, 0xbc, 0x3c, 0xf2, 0x09, 0xcb, 0x7b, 0x7c, 0x3c, 0x61, 0xc7, 0xdb, + 0x09, 0x93, 0x3f, 0x27, 0x6c, 0x3e, 0x82, 0xbd, 0x55, 0x94, 0x62, 0xca, 0x28, 0xab, 0x29, 0xa3, + 0x42, 0xd9, 0x23, 0x9c, 0x08, 0xde, 0x6a, 0x96, 0x58, 0xcb, 0x97, 0xf9, 0x1c, 0xea, 0x8f, 0xb3, + 0xbc, 0x43, 0x6f, 0x01, 0x7b, 0x0d, 0xde, 0xc6, 0xf4, 0x95, 0xc8, 0xdf, 0x29, 0x50, 0x49, 0xf0, + 0x3a, 0x53, 0x8e, 0xf1, 0xff, 0x28, 0x86, 0xeb, 0xeb, 0x32, 0xbb, 0x5d, 0xf9, 0x5f, 0xb7, 0xfb, + 0x4d, 0x01, 0x48, 0xab, 0x51, 0x90, 0xf4, 0x15, 0x54, 0x65, 0xca, 0x8b, 0xb2, 0x51, 0x6e, 0x52, + 0x36, 0xf7, 0xe6, 0x33, 0x4d, 0x2d, 0x94, 0x8d, 0xfc, 0x83, 0xa4, 0x35, 0xb3, 0xa6, 0x60, 0x4a, + 0xff, 0xb1, 0x60, 0xbe, 0x85, 0xdd, 0x5c, 0x3f, 0x11, 0xc1, 0xab, 0x50, 0x0e, 0x09, 0x1f, 0xc8, + 0xc7, 0x10, 0x6b, 0xf5, 0x04, 0x6a, 0xb2, 0x56, 0xf2, 0xfd, 0xa1, 0x6e, 0xa4, 0x9d, 0xca, 0xc8, + 0x3a, 0x95, 0xd1, 0xa6, 0xd3, 0xce, 0x2b, 0xf3, 0x99, 0x76, 0xa7, 0x50, 0x5f, 0xb2, 0x03, 0x54, + 0xdd, 0xa5, 0x27, 0xe9, 0xfe, 0x07, 0x05, 0xd4, 0xe2, 0x7f, 0x79, 0x6d, 0x08, 0x4f, 0xd6, 0x75, + 0xa9, 0xab, 0xa3, 0xb8, 0x41, 0x2b, 0x92, 0xb1, 0x50, 0xb8, 0xd3, 0x5d, 0xf4, 0xee, 0xcd, 0xb1, + 0xbc, 0x07, 0xb0, 0x6c, 0xf3, 0x32, 0x8c, 0xd7, 0x45, 0x45, 0xe6, 0xba, 0xff, 0x12, 0xec, 0x7d, + 0xea, 0x59, 0xb9, 0x03, 0xd2, 0xdf, 0x53, 0xd8, 0xeb, 0xa6, 0xd3, 0xc0, 0x66, 0x67, 0x06, 0xbc, + 0x24, 0xa7, 0x86, 0xc5, 0x85, 0x85, 0x27, 0x39, 0x49, 0x48, 0x0c, 0x2b, 0x33, 0x92, 0xe8, 0x8f, + 0xa0, 0x7e, 0x42, 0xdc, 0x33, 0xe4, 0x5d, 0x36, 0x1c, 0xfa, 0x7c, 0x88, 0x94, 0xaf, 0xf5, 0xd0, + 0x4c, 0xae, 0x93, 0x59, 0xc9, 0xda, 0xcd, 0x49, 0xf4, 0x27, 0xb0, 0x9f, 0x62, 0xb5, 0xdd, 0x33, + 0xca, 0x26, 0x01, 0x7a, 0x7d, 0xdc, 0x08, 0x78, 0x04, 0xbb, 0xa4, 0x68, 0x2a, 0x51, 0x57, 0xc5, + 0xfa, 0x3b, 0x70, 0x78, 0x25, 0x74, 0xdb, 0x49, 0x1e, 0x68, 0x2d, 0x29, 0xfa, 0x00, 0xea, 0x9f, + 0xe0, 0x39, 0xcf, 0x66, 0x18, 0x0b, 0xdd, 0xf1, 0xda, 0x68, 0xde, 0x85, 0xdb, 0x14, 0xcf, 0x79, + 0x32, 0x01, 0xd9, 0x11, 0xba, 0xe3, 0xb4, 0xf4, 0xf3, 0xff, 0xc3, 0x82, 0x5a, 0xb7, 0xaa, 0x34, + 0x85, 0x4e, 0x50, 0x3b, 0x4f, 0x7f, 0xbf, 0x68, 0x2a, 0xcf, 0x2e, 0x9a, 0xca, 0xdf, 0x17, 0x4d, + 0xe5, 0xc7, 0xcb, 0xe6, 0xd6, 0xb3, 0xcb, 0xe6, 0xd6, 0x5f, 0x97, 0xcd, 0xad, 0x2f, 0x3a, 0x7d, + 0x9f, 0x0f, 0x46, 0x8e, 0xe1, 0xb2, 0xa1, 0x29, 0xa7, 0xc5, 0xf4, 0x73, 0x3f, 0xf6, 0xce, 0xcc, + 0x73, 0x73, 0x31, 0x95, 0xde, 0xbf, 0x6a, 0x2c, 0xe5, 0xd3, 0x10, 0x63, 0xe7, 0x96, 0x48, 0xda, + 0xb7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x98, 0xb3, 0x9b, 0xc3, 0x0a, 0x00, 0x00, } func (m *ClientState) Marshal() (dAtA []byte, err error) { @@ -403,7 +889,14 @@ func (m *ConsensusState) MarshalToSizedBuffer(dAtA []byte) (int, error) { if m.Timestamp != 0 { i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) i-- - dAtA[i] = 0x18 + dAtA[i] = 0x20 + } + if len(m.Diversifier) > 0 { + i -= len(m.Diversifier) + copy(dAtA[i:], m.Diversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier))) + i-- + dAtA[i] = 0x1a } if m.PublicKey != nil { { @@ -445,6 +938,13 @@ func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.NewDiversifier) > 0 { + i -= len(m.NewDiversifier) + copy(dAtA[i:], m.NewDiversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier))) + i-- + dAtA[i] = 0x2a + } if m.NewPublicKey != nil { { size, err := m.NewPublicKey.MarshalToSizedBuffer(dAtA[:i]) @@ -455,14 +955,19 @@ func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintSolomachine(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } if len(m.Signature) > 0 { i -= len(m.Signature) copy(dAtA[i:], m.Signature) i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a + } + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 } if m.Sequence != 0 { i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) @@ -603,6 +1108,402 @@ func (m *TimestampedSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SignBytes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignBytes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignBytes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x22 + } + if len(m.Diversifier) > 0 { + i -= len(m.Diversifier) + copy(dAtA[i:], m.Diversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier))) + i-- + dAtA[i] = 0x1a + } + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HeaderData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HeaderData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NewDiversifier) > 0 { + i -= len(m.NewDiversifier) + copy(dAtA[i:], m.NewDiversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier))) + i-- + dAtA[i] = 0x12 + } + if m.NewPubKey != nil { + { + size, err := m.NewPubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClientState != nil { + { + size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsensusStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConnectionStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConnectionStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Connection != nil { + { + size, err := m.Connection.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ChannelStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChannelStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChannelStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Channel != nil { + { + size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketCommitmentData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketCommitmentData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketCommitmentData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Commitment) > 0 { + i -= len(m.Commitment) + copy(dAtA[i:], m.Commitment) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Commitment))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketAcknowledgementData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketAcknowledgementData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketAcknowledgementData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Acknowledgement) > 0 { + i -= len(m.Acknowledgement) + copy(dAtA[i:], m.Acknowledgement) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Acknowledgement))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketAcknowledgementAbsenseData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketAcknowledgementAbsenseData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketAcknowledgementAbsenseData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NextSequenceRecvData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NextSequenceRecvData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NextSequenceRecvData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NextSeqRecv != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.NextSeqRecv)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintSolomachine(dAtA []byte, offset int, v uint64) int { offset -= sovSolomachine(v) base := offset @@ -646,6 +1547,10 @@ func (m *ConsensusState) Size() (n int) { l = m.PublicKey.Size() n += 1 + l + sovSolomachine(uint64(l)) } + l = len(m.Diversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } if m.Timestamp != 0 { n += 1 + sovSolomachine(uint64(m.Timestamp)) } @@ -661,6 +1566,9 @@ func (m *Header) Size() (n int) { if m.Sequence != 0 { n += 1 + sovSolomachine(uint64(m.Sequence)) } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } l = len(m.Signature) if l > 0 { n += 1 + l + sovSolomachine(uint64(l)) @@ -669,6 +1577,10 @@ func (m *Header) Size() (n int) { l = m.NewPublicKey.Size() n += 1 + l + sovSolomachine(uint64(l)) } + l = len(m.NewDiversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } return n } @@ -729,6 +1641,177 @@ func (m *TimestampedSignature) Size() (n int) { return n } +func (m *SignBytes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + l = len(m.Diversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *HeaderData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NewPubKey != nil { + l = m.NewPubKey.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.NewDiversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ClientStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.ClientState != nil { + l = m.ClientState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ConsensusStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ConnectionStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Connection != nil { + l = m.Connection.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ChannelStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Channel != nil { + l = m.Channel.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketCommitmentData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Commitment) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketAcknowledgementData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Acknowledgement) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketAcknowledgementAbsenseData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *NextSequenceRecvData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.NextSeqRecv != 0 { + n += 1 + sovSolomachine(uint64(m.NextSeqRecv)) + } + return n +} + func sovSolomachine(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -948,6 +2031,38 @@ func (m *ConsensusState) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Diversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } @@ -1039,6 +2154,25 @@ func (m *Header) Unmarshal(dAtA []byte) error { } } case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) } @@ -1072,7 +2206,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.Signature = []byte{} } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NewPublicKey", wireType) } @@ -1108,6 +2242,38 @@ func (m *Header) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewDiversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipSolomachine(dAtA[iNdEx:]) @@ -1535,6 +2701,1211 @@ func (m *TimestampedSignature) Unmarshal(dAtA []byte) error { } return nil } +func (m *SignBytes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignBytes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignBytes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Diversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeaderData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeaderData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeaderData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPubKey == nil { + m.NewPubKey = &types.PublicKey{} + } + if err := m.NewPubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewDiversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientState == nil { + m.ClientState = &types1.Any{} + } + if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &types1.Any{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectionStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectionStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectionStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Connection == nil { + m.Connection = &types2.ConnectionEnd{} + } + if err := m.Connection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChannelStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChannelStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChannelStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Channel == nil { + m.Channel = &types3.Channel{} + } + if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketCommitmentData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketCommitmentData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketCommitmentData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commitment", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commitment = append(m.Commitment[:0], dAtA[iNdEx:postIndex]...) + if m.Commitment == nil { + m.Commitment = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketAcknowledgementData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketAcknowledgementData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketAcknowledgementData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgement", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Acknowledgement = append(m.Acknowledgement[:0], dAtA[iNdEx:postIndex]...) + if m.Acknowledgement == nil { + m.Acknowledgement = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketAcknowledgementAbsenseData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketAcknowledgementAbsenseData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketAcknowledgementAbsenseData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NextSequenceRecvData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NextSequenceRecvData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NextSequenceRecvData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextSeqRecv", wireType) + } + m.NextSeqRecv = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextSeqRecv |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipSolomachine(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/ibc/light-clients/solomachine/types/solomachine_test.go b/x/ibc/light-clients/solomachine/types/solomachine_test.go index 84c27e6e8..dbfddd6f5 100644 --- a/x/ibc/light-clients/solomachine/types/solomachine_test.go +++ b/x/ibc/light-clients/solomachine/types/solomachine_test.go @@ -27,11 +27,11 @@ type SoloMachineTestSuite struct { } func (suite *SoloMachineTestSuite) SetupTest() { - suite.solomachine = ibctesting.NewSolomachine(suite.T(), "testingsolomachine") suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + suite.solomachine = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "testingsolomachine", "testing") suite.store = suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), exported.ClientTypeSoloMachine) bz, err := codec.MarshalAny(suite.chainA.Codec, suite.solomachine.ClientState()) diff --git a/x/ibc/light-clients/solomachine/types/update.go b/x/ibc/light-clients/solomachine/types/update.go index ef5fc3b39..d43bf6c86 100644 --- a/x/ibc/light-clients/solomachine/types/update.go +++ b/x/ibc/light-clients/solomachine/types/update.go @@ -23,7 +23,7 @@ func (cs ClientState) CheckHeaderAndUpdateState( ) } - if err := checkHeader(&cs, smHeader); err != nil { + if err := checkHeader(cdc, &cs, smHeader); err != nil { return nil, nil, err } @@ -32,7 +32,7 @@ func (cs ClientState) CheckHeaderAndUpdateState( } // checkHeader checks if the Solo Machine update signature is valid. -func checkHeader(clientState *ClientState, header *Header) error { +func checkHeader(cdc codec.BinaryMarshaler, clientState *ClientState, header *Header) error { // assert update sequence is current sequence if header.Sequence != clientState.ConsensusState.Sequence { return sdkerrors.Wrapf( @@ -42,7 +42,11 @@ func checkHeader(clientState *ClientState, header *Header) error { } // assert currently registered public key signed over the new public key with correct sequence - data := HeaderSignBytes(header) + data, err := HeaderSignBytes(cdc, header) + if err != nil { + return err + } + if err := VerifySignature(clientState.ConsensusState.GetPubKey(), data, header.Signature); err != nil { return sdkerrors.Wrap(ErrInvalidHeader, err.Error()) } @@ -54,8 +58,10 @@ func checkHeader(clientState *ClientState, header *Header) error { func update(clientState *ClientState, header *Header) (*ClientState, *ConsensusState) { consensusState := &ConsensusState{ // increment sequence number - Sequence: clientState.ConsensusState.Sequence + 1, - PublicKey: header.NewPublicKey, + Sequence: clientState.ConsensusState.Sequence + 1, + PublicKey: header.NewPublicKey, + Diversifier: header.NewDiversifier, + Timestamp: header.Timestamp, } clientState.ConsensusState = consensusState diff --git a/x/ibc/light-clients/solomachine/types/update_test.go b/x/ibc/light-clients/solomachine/types/update_test.go index 6baadf986..d9572c6dd 100644 --- a/x/ibc/light-clients/solomachine/types/update_test.go +++ b/x/ibc/light-clients/solomachine/types/update_test.go @@ -1,6 +1,7 @@ package types_test import ( + "github.com/cosmos/cosmos-sdk/std" sdk "github.com/cosmos/cosmos-sdk/types" ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/07-tendermint/types" "github.com/cosmos/cosmos-sdk/x/ibc/exported" @@ -69,9 +70,29 @@ func (suite *SoloMachineTestSuite) TestCheckHeaderAndUpdateState() { cs := suite.solomachine.ClientState() h := suite.solomachine.CreateHeader() + publicKey, err := std.DefaultPublicKeyCodec{}.Encode(suite.solomachine.PublicKey) + suite.NoError(err) + + data := &types.HeaderData{ + NewPubKey: publicKey, + NewDiversifier: h.NewDiversifier, + } + + dataBz, err := suite.chainA.Codec.MarshalBinaryBare(data) + suite.Require().NoError(err) + // generate invalid signature - data := append(sdk.Uint64ToBigEndian(cs.ConsensusState.Sequence), suite.solomachine.PublicKey.Bytes()...) - sig, err := suite.solomachine.PrivateKey.Sign(data) + signBytes := &types.SignBytes{ + Sequence: cs.ConsensusState.Sequence, + Timestamp: suite.solomachine.Time, + Diversifier: suite.solomachine.Diversifier, + Data: dataBz, + } + + signBz, err := suite.chainA.Codec.MarshalBinaryBare(signBytes) + suite.Require().NoError(err) + + sig, err := suite.solomachine.PrivateKey.Sign(signBz) suite.Require().NoError(err) h.Signature = sig diff --git a/x/ibc/testing/chain.go b/x/ibc/testing/chain.go index a3982b10e..652d7d7b5 100644 --- a/x/ibc/testing/chain.go +++ b/x/ibc/testing/chain.go @@ -372,6 +372,7 @@ func (chain *TestChain) GetFirstTestConnection(clientID, counterpartyClientID st } // ConstructMsgCreateClient constructs a message to create a new client state (tendermint or solomachine). +// NOTE: a solo machine client will be created with an empty diversifier. func (chain *TestChain) ConstructMsgCreateClient(counterparty *TestChain, clientID string, clientType string) *clienttypes.MsgCreateClient { var ( clientState exported.ClientState @@ -388,7 +389,7 @@ func (chain *TestChain) ConstructMsgCreateClient(counterparty *TestChain, client ) consensusState = counterparty.LastHeader.ConsensusState() case exported.ClientTypeSoloMachine: - solo := NewSolomachine(chain.t, clientID) + solo := NewSolomachine(chain.t, chain.Codec, clientID, "") clientState = solo.ClientState() consensusState = solo.ConsensusState() default: diff --git a/x/ibc/testing/solomachine.go b/x/ibc/testing/solomachine.go index aba04b3f8..3df45874b 100644 --- a/x/ibc/testing/solomachine.go +++ b/x/ibc/testing/solomachine.go @@ -7,8 +7,8 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/std" - sdk "github.com/cosmos/cosmos-sdk/types" clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/02-client/types" "github.com/cosmos/cosmos-sdk/x/ibc/exported" solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/solomachine/types" @@ -19,41 +19,48 @@ import ( type Solomachine struct { t *testing.T - ClientID string - PrivateKey crypto.PrivKey - PublicKey crypto.PubKey - Sequence uint64 - Time uint64 + cdc codec.BinaryMarshaler + ClientID string + PrivateKey crypto.PrivKey + PublicKey crypto.PubKey + Sequence uint64 + Time uint64 + Diversifier string } // NewSolomachine returns a new solomachine instance with a generated private/public // key pair and a sequence starting at 1. -func NewSolomachine(t *testing.T, clientID string) *Solomachine { +func NewSolomachine(t *testing.T, cdc codec.BinaryMarshaler, clientID, diversifier string) *Solomachine { privKey := ed25519.GenPrivKey() return &Solomachine{ - t: t, - ClientID: clientID, - PrivateKey: privKey, - PublicKey: privKey.PubKey(), - Sequence: 1, - Time: 10, + t: t, + cdc: cdc, + ClientID: clientID, + PrivateKey: privKey, + PublicKey: privKey.PubKey(), + Sequence: 1, + Time: 10, + Diversifier: diversifier, } } -// default usage does not allow update after governance proposal +// ClientState returns a new solo machine ClientState instance. Default usage does not allow update +// after governance proposal func (solo *Solomachine) ClientState() *solomachinetypes.ClientState { return solomachinetypes.NewClientState(solo.ConsensusState(), false) } +// ConsensusState returns a new solo machine ConsensusState instance func (solo *Solomachine) ConsensusState() *solomachinetypes.ConsensusState { publicKey, err := std.DefaultPublicKeyCodec{}.Encode(solo.PublicKey) require.NoError(solo.t, err) return &solomachinetypes.ConsensusState{ - Sequence: solo.Sequence, - PublicKey: publicKey, - Timestamp: solo.Time, + Sequence: solo.Sequence, + PublicKey: publicKey, + Diversifier: solo.Diversifier, + Timestamp: solo.Time, } } @@ -67,17 +74,37 @@ func (solo *Solomachine) GetHeight() exported.Height { func (solo *Solomachine) CreateHeader() *solomachinetypes.Header { // generate new private key and signature for header newPrivKey := ed25519.GenPrivKey() - data := append(sdk.Uint64ToBigEndian(solo.Sequence), newPrivKey.PubKey().Bytes()...) - signature, err := solo.PrivateKey.Sign(data) - require.NoError(solo.t, err) publicKey, err := std.DefaultPublicKeyCodec{}.Encode(newPrivKey.PubKey()) require.NoError(solo.t, err) + data := &solomachinetypes.HeaderData{ + NewPubKey: publicKey, + NewDiversifier: solo.Diversifier, + } + + dataBz, err := solo.cdc.MarshalBinaryBare(data) + require.NoError(solo.t, err) + + signBytes := &solomachinetypes.SignBytes{ + Sequence: solo.Sequence, + Timestamp: solo.Time, + Diversifier: solo.Diversifier, + Data: dataBz, + } + + signBz, err := solo.cdc.MarshalBinaryBare(signBytes) + require.NoError(solo.t, err) + + signature, err := solo.PrivateKey.Sign(signBz) + require.NoError(solo.t, err) + header := &solomachinetypes.Header{ - Sequence: solo.Sequence, - Signature: signature, - NewPublicKey: publicKey, + Sequence: solo.Sequence, + Timestamp: solo.Time, + Signature: signature, + NewPublicKey: publicKey, + NewDiversifier: solo.Diversifier, } // assumes successful header update @@ -94,7 +121,17 @@ func (solo *Solomachine) CreateMisbehaviour() *solomachinetypes.Misbehaviour { dataOne := []byte("DATA ONE") dataTwo := []byte("DATA TWO") - sig, err := solo.PrivateKey.Sign(append(sdk.Uint64ToBigEndian(solo.Sequence), dataOne...)) + signBytes := &solomachinetypes.SignBytes{ + Sequence: solo.Sequence, + Timestamp: solo.Time, + Diversifier: solo.Diversifier, + Data: dataOne, + } + + signBz, err := solo.cdc.MarshalBinaryBare(signBytes) + require.NoError(solo.t, err) + + sig, err := solo.PrivateKey.Sign(signBz) require.NoError(solo.t, err) signatureOne := solomachinetypes.SignatureAndData{ @@ -102,7 +139,17 @@ func (solo *Solomachine) CreateMisbehaviour() *solomachinetypes.Misbehaviour { Data: dataOne, } - sig, err = solo.PrivateKey.Sign(append(sdk.Uint64ToBigEndian(solo.Sequence), dataTwo...)) + signBytes = &solomachinetypes.SignBytes{ + Sequence: solo.Sequence, + Timestamp: solo.Time, + Diversifier: solo.Diversifier, + Data: dataTwo, + } + + signBz, err = solo.cdc.MarshalBinaryBare(signBytes) + require.NoError(solo.t, err) + + sig, err = solo.PrivateKey.Sign(signBz) require.NoError(solo.t, err) signatureTwo := solomachinetypes.SignatureAndData{ diff --git a/x/mint/types/query.pb.gw.go b/x/mint/types/query.pb.gw.go index c70c3e60b..644c2782e 100644 --- a/x/mint/types/query.pb.gw.go +++ b/x/mint/types/query.pb.gw.go @@ -88,7 +88,6 @@ func local_request_Query_AnnualProvisions_0(ctx context.Context, marshaler runti // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/params/types/proposal/query.pb.gw.go b/x/params/types/proposal/query.pb.gw.go index c7fab13fc..05f80279a 100644 --- a/x/params/types/proposal/query.pb.gw.go +++ b/x/params/types/proposal/query.pb.gw.go @@ -70,7 +70,6 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/slashing/types/query.pb.gw.go b/x/slashing/types/query.pb.gw.go index 5ac329602..77c4688d9 100644 --- a/x/slashing/types/query.pb.gw.go +++ b/x/slashing/types/query.pb.gw.go @@ -142,7 +142,6 @@ func local_request_Query_SigningInfos_0(ctx context.Context, marshaler runtime.M // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/staking/types/query.pb.gw.go b/x/staking/types/query.pb.gw.go index 0c4333302..8995f7645 100644 --- a/x/staking/types/query.pb.gw.go +++ b/x/staking/types/query.pb.gw.go @@ -874,7 +874,6 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_Validators_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { diff --git a/x/upgrade/types/query.pb.gw.go b/x/upgrade/types/query.pb.gw.go index 6d311b577..004a27419 100644 --- a/x/upgrade/types/query.pb.gw.go +++ b/x/upgrade/types/query.pb.gw.go @@ -106,7 +106,6 @@ func local_request_Query_AppliedPlan_0(ctx context.Context, marshaler runtime.Ma // RegisterQueryHandlerServer registers the http handlers for service Query to "mux". // UnaryRPC :call QueryServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { mux.Handle("GET", pattern_Query_CurrentPlan_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {