Split out quic- and udp-client definitions (#28762)

* Move ConnectionCache back to solana-client, and duplicate ThinClient, TpuClient there

* Dedupe thin_client modules

* Dedupe tpu_client modules

* Move TpuClient to TpuConnectionCache

* Move ThinClient to TpuConnectionCache

* Move TpuConnection and quic/udp trait implementations back to solana-client

* Remove enum_dispatch from solana-tpu-client

* Move udp-client to its own crate

* Move quic-client to its own crate
This commit is contained in:
Tyera 2022-11-18 12:21:45 -07:00 committed by GitHub
parent dcfb73f664
commit c32377b5af
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
76 changed files with 2620 additions and 1257 deletions

60
Cargo.lock generated
View File

@ -4637,6 +4637,7 @@ dependencies = [
"log",
"rand 0.7.3",
"rayon",
"solana-client",
"solana-core",
"solana-gossip",
"solana-ledger",
@ -4689,7 +4690,6 @@ dependencies = [
"solana-runtime",
"solana-sdk 1.15.0",
"solana-send-transaction-service",
"solana-tpu-client",
"tarpc",
"tokio",
"tokio-serde",
@ -4915,6 +4915,7 @@ dependencies = [
"solana-clap-utils",
"solana-cli-config",
"solana-cli-output",
"solana-client",
"solana-config-program",
"solana-faucet",
"solana-logger 1.15.0",
@ -4983,15 +4984,33 @@ dependencies = [
name = "solana-client"
version = "1.15.0"
dependencies = [
"async-trait",
"bincode",
"enum_dispatch",
"futures 0.3.24",
"futures-util",
"indexmap",
"indicatif",
"log",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rayon",
"solana-logger 1.15.0",
"solana-measure",
"solana-metrics",
"solana-net-utils",
"solana-pubsub-client",
"solana-quic-client",
"solana-rpc-client",
"solana-rpc-client-api",
"solana-rpc-client-nonce-utils",
"solana-sdk 1.15.0",
"solana-streamer",
"solana-thin-client",
"solana-tpu-client",
"solana-udp-client",
"thiserror",
"tokio",
]
[[package]]
@ -5076,6 +5095,7 @@ dependencies = [
"serial_test",
"solana-address-lookup-table-program",
"solana-bloom",
"solana-client",
"solana-entry",
"solana-frozen-abi 1.15.0",
"solana-frozen-abi-macro 1.15.0",
@ -5125,6 +5145,7 @@ dependencies = [
"serde",
"serial_test",
"solana-bench-tps",
"solana-client",
"solana-core",
"solana-faucet",
"solana-gossip",
@ -5387,6 +5408,7 @@ dependencies = [
"serial_test",
"solana-bloom",
"solana-clap-utils",
"solana-client",
"solana-entry",
"solana-frozen-abi 1.15.0",
"solana-frozen-abi-macro 1.15.0",
@ -5571,6 +5593,7 @@ dependencies = [
"rand 0.7.3",
"rayon",
"serial_test",
"solana-client",
"solana-config-program",
"solana-core",
"solana-download-utils",
@ -5959,12 +5982,28 @@ dependencies = [
name = "solana-quic-client"
version = "1.15.0"
dependencies = [
"async-mutex",
"async-trait",
"crossbeam-channel",
"futures 0.3.24",
"itertools",
"lazy_static",
"log",
"quinn",
"quinn-proto",
"quinn-udp",
"rustls 0.20.6",
"solana-logger 1.15.0",
"solana-measure",
"solana-metrics",
"solana-net-utils",
"solana-perf",
"solana-rpc-client-api",
"solana-sdk 1.15.0",
"solana-streamer",
"solana-tpu-client",
"thiserror",
"tokio",
]
[[package]]
@ -6019,6 +6058,7 @@ dependencies = [
"soketto",
"solana-account-decoder",
"solana-address-lookup-table-program",
"solana-client",
"solana-entry",
"solana-faucet",
"solana-gossip",
@ -6128,6 +6168,7 @@ dependencies = [
"serde",
"serde_json",
"solana-account-decoder",
"solana-client",
"solana-logger 1.15.0",
"solana-pubsub-client",
"solana-rpc",
@ -6345,6 +6386,7 @@ version = "1.15.0"
dependencies = [
"crossbeam-channel",
"log",
"solana-client",
"solana-logger 1.15.0",
"solana-measure",
"solana-metrics",
@ -6566,35 +6608,23 @@ dependencies = [
name = "solana-tpu-client"
version = "1.15.0"
dependencies = [
"async-mutex",
"async-trait",
"bincode",
"crossbeam-channel",
"enum_dispatch",
"futures 0.3.24",
"futures-util",
"indexmap",
"indicatif",
"itertools",
"lazy_static",
"log",
"quinn",
"quinn-proto",
"quinn-udp",
"rand 0.7.3",
"rand_chacha 0.2.2",
"rayon",
"rustls 0.20.6",
"solana-logger 1.15.0",
"solana-measure",
"solana-metrics",
"solana-net-utils",
"solana-perf",
"solana-pubsub-client",
"solana-rpc-client",
"solana-rpc-client-api",
"solana-sdk 1.15.0",
"solana-streamer",
"thiserror",
"tokio",
]
@ -6654,9 +6684,13 @@ dependencies = [
name = "solana-udp-client"
version = "1.15.0"
dependencies = [
"async-trait",
"solana-net-utils",
"solana-sdk 1.15.0",
"solana-streamer",
"solana-tpu-client",
"thiserror",
"tokio",
]
[[package]]

View File

@ -14,6 +14,7 @@ crossbeam-channel = "0.5"
log = "0.4.17"
rand = "0.7.0"
rayon = "1.5.3"
solana-client = { path = "../client", version = "=1.15.0" }
solana-core = { path = "../core", version = "=1.15.0" }
solana-gossip = { path = "../gossip", version = "=1.15.0" }
solana-ledger = { path = "../ledger", version = "=1.15.0" }

View File

@ -5,6 +5,7 @@ use {
log::*,
rand::{thread_rng, Rng},
rayon::prelude::*,
solana_client::connection_cache::ConnectionCache,
solana_core::banking_stage::BankingStage,
solana_gossip::cluster_info::{ClusterInfo, Node},
solana_ledger::{
@ -28,7 +29,7 @@ use {
transaction::Transaction,
},
solana_streamer::socket::SocketAddrSpace,
solana_tpu_client::connection_cache::{ConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE},
solana_tpu_client::tpu_connection_cache::DEFAULT_TPU_CONNECTION_POOL_SIZE,
std::{
sync::{atomic::Ordering, Arc, RwLock},
thread::sleep,

View File

@ -18,7 +18,6 @@ solana-client = { path = "../client", version = "=1.15.0" }
solana-runtime = { path = "../runtime", version = "=1.15.0" }
solana-sdk = { path = "../sdk", version = "=1.15.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.15.0" }
solana-tpu-client = { path = "../tpu-client", version = "=1.15.0", default-features = false }
tarpc = { version = "0.29.0", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }

View File

@ -7,6 +7,7 @@ use {
BanksTransactionResultWithSimulation, TransactionConfirmationStatus, TransactionMetadata,
TransactionSimulationDetails, TransactionStatus,
},
solana_client::connection_cache::ConnectionCache,
solana_runtime::{
bank::{Bank, TransactionExecutionResult, TransactionSimulationResult},
bank_forks::BankForks,
@ -28,7 +29,6 @@ use {
send_transaction_service::{SendTransactionService, TransactionInfo},
tpu_info::NullTpuInfo,
},
solana_tpu_client::connection_cache::ConnectionCache,
std::{
convert::TryFrom,
io,

View File

@ -3,8 +3,8 @@
use {
crate::banks_server::start_tcp_server,
futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select},
solana_client::connection_cache::ConnectionCache,
solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache},
solana_tpu_client::connection_cache::ConnectionCache,
std::{
net::SocketAddr,
sync::{

View File

@ -1,5 +1,6 @@
use {
crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result},
solana_client::thin_client::ThinClient,
solana_sdk::{
account::Account,
client::{AsyncClient, Client, SyncClient},
@ -11,7 +12,6 @@ use {
signature::Signature,
transaction::Transaction,
},
solana_thin_client::thin_client::ThinClient,
};
impl BenchTpsClient for ThinClient {

View File

@ -1,10 +1,10 @@
use {
crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result},
solana_client::tpu_client::TpuClient,
solana_sdk::{
account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash,
message::Message, pubkey::Pubkey, signature::Signature, transaction::Transaction,
},
solana_tpu_client::tpu_client::TpuClient,
};
impl BenchTpsClient for TpuClient {

View File

@ -8,7 +8,9 @@ use {
pubkey::Pubkey,
signature::{read_keypair_file, Keypair},
},
solana_tpu_client::connection_cache::{DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_USE_QUIC},
solana_tpu_client::tpu_connection_cache::{
DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_USE_QUIC,
},
std::{net::SocketAddr, process::exit, time::Duration},
};

View File

@ -10,6 +10,11 @@ use {
keypairs::get_keypairs,
send_batch::{generate_durable_nonce_accounts, generate_keypairs},
},
solana_client::{
connection_cache::ConnectionCache,
thin_client::ThinClient,
tpu_client::{TpuClient, TpuClientConfig},
},
solana_genesis::Base64Account,
solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client},
solana_rpc_client::rpc_client::RpcClient,
@ -18,11 +23,6 @@ use {
system_program,
},
solana_streamer::socket::SocketAddrSpace,
solana_thin_client::thin_client::ThinClient,
solana_tpu_client::{
connection_cache::ConnectionCache,
tpu_client::{TpuClient, TpuClientConfig},
},
std::{
collections::HashMap, fs::File, io::prelude::*, net::SocketAddr, path::Path, process::exit,
sync::Arc,

View File

@ -8,6 +8,11 @@ use {
send_batch::generate_durable_nonce_accounts,
spl_convert::FromOtherSolana,
},
solana_client::{
connection_cache::ConnectionCache,
thin_client::ThinClient,
tpu_client::{TpuClient, TpuClientConfig},
},
solana_core::validator::ValidatorConfig,
solana_faucet::faucet::run_local_faucet,
solana_local_cluster::{
@ -25,11 +30,6 @@ use {
},
solana_streamer::socket::SocketAddrSpace,
solana_test_validator::TestValidatorGenesis,
solana_thin_client::thin_client::ThinClient,
solana_tpu_client::{
connection_cache::ConnectionCache,
tpu_client::{TpuClient, TpuClientConfig},
},
std::{sync::Arc, time::Duration},
};

View File

@ -40,6 +40,7 @@ solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.15.
solana-clap-utils = { path = "../clap-utils", version = "=1.15.0" }
solana-cli-config = { path = "../cli-config", version = "=1.15.0" }
solana-cli-output = { path = "../cli-output", version = "=1.15.0" }
solana-client = { path = "../client", version = "=1.15.0" }
solana-config-program = { path = "../programs/config", version = "=1.15.0" }
solana-faucet = { path = "../faucet", version = "=1.15.0" }
solana-logger = { path = "../logger", version = "=1.15.0" }

View File

@ -31,7 +31,7 @@ use {
stake::{instruction::LockupArgs, state::Lockup},
transaction::{TransactionError, VersionedTransaction},
},
solana_tpu_client::connection_cache::DEFAULT_TPU_ENABLE_UDP,
solana_tpu_client::tpu_connection_cache::DEFAULT_TPU_ENABLE_UDP,
solana_vote_program::vote_state::VoteAuthorize,
std::{collections::HashMap, error, io::stdout, str::FromStr, sync::Arc, time::Duration},
thiserror::Error,

View File

@ -17,6 +17,10 @@ use {
CliUpgradeableBuffer, CliUpgradeableBuffers, CliUpgradeableProgram,
CliUpgradeableProgramClosed, CliUpgradeablePrograms,
},
solana_client::{
connection_cache::ConnectionCache,
tpu_client::{TpuClient, TpuClientConfig},
},
solana_program_runtime::invoke_context::InvokeContext,
solana_rbpf::{
elf::Executable,
@ -48,10 +52,6 @@ use {
transaction::{Transaction, TransactionError},
transaction_context::TransactionContext,
},
solana_tpu_client::{
connection_cache::ConnectionCache,
tpu_client::{TpuClient, TpuClientConfig},
},
std::{
fs::File,
io::{Read, Write},

View File

@ -10,17 +10,35 @@ license = "Apache-2.0"
edition = "2021"
[dependencies]
async-trait = "0.1.57"
bincode = "1.3.3"
enum_dispatch = "0.3.8"
futures = "0.3"
futures-util = "0.3.21"
indexmap = "1.9.1"
indicatif = { version = "0.17.1" }
log = "0.4.17"
rand = "0.7.0"
rayon = "1.5.3"
solana-measure = { path = "../measure", version = "=1.15.0" }
solana-metrics = { path = "../metrics", version = "=1.15.0" }
solana-net-utils = { path = "../net-utils", version = "=1.15.0" }
solana-pubsub-client = { path = "../pubsub-client", version = "=1.15.0" }
solana-quic-client = { path = "../quic-client", version = "=1.15.0" }
solana-rpc-client = { path = "../rpc-client", version = "=1.15.0" }
solana-rpc-client-api = { path = "../rpc-client-api", version = "=1.15.0" }
solana-rpc-client-nonce-utils = { path = "../rpc-client-nonce-utils", version = "=1.15.0" }
solana-sdk = { path = "../sdk", version = "=1.15.0" }
solana-streamer = { path = "../streamer", version = "=1.15.0" }
solana-thin-client = { path = "../thin-client", version = "=1.15.0" }
solana-tpu-client = { path = "../tpu-client", version = "=1.15.0" }
solana-udp-client = { path = "../udp-client", version = "=1.15.0" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
[dev-dependencies]
rand_chacha = "0.2.2"
solana-logger = { path = "../logger", version = "=1.15.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -1,19 +1,16 @@
pub use crate::tpu_connection_cache::{
pub use solana_tpu_client::tpu_connection_cache::{
DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, DEFAULT_TPU_USE_QUIC,
};
use {
crate::{
connection_cache_stats::{ConnectionCacheStats, CONNECTION_STAT_SUBMISSION_INTERVAL},
nonblocking::{
quic_client::{QuicClient, QuicClientCertificate, QuicLazyInitializedEndpoint},
tpu_connection::NonblockingConnection,
},
tpu_connection::BlockingConnection,
tpu_connection_cache::MAX_CONNECTIONS,
nonblocking::tpu_connection::NonblockingConnection, tpu_connection::BlockingConnection,
},
indexmap::map::{Entry, IndexMap},
rand::{thread_rng, Rng},
solana_measure::measure::Measure,
solana_quic_client::nonblocking::quic_client::{
QuicClient, QuicClientCertificate, QuicLazyInitializedEndpoint,
},
solana_sdk::{
pubkey::Pubkey, quic::QUIC_PORT_OFFSET, signature::Keypair, timing::AtomicInterval,
},
@ -22,6 +19,10 @@ use {
streamer::StakedNodes,
tls_certificates::new_self_signed_tls_certificate_chain,
},
solana_tpu_client::{
connection_cache_stats::{ConnectionCacheStats, CONNECTION_STAT_SUBMISSION_INTERVAL},
tpu_connection_cache::MAX_CONNECTIONS,
},
std::{
error::Error,
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},

View File

@ -1,6 +1,16 @@
#![allow(clippy::integer_arithmetic)]
pub mod connection_cache;
pub mod nonblocking;
pub mod quic_client;
pub mod thin_client;
pub mod tpu_client;
pub mod tpu_connection;
pub mod transaction_executor;
pub mod udp_client;
#[macro_use]
extern crate solana_metrics;
pub use solana_rpc_client::mock_sender_for_cli;
@ -12,50 +22,6 @@ pub mod client_error {
reqwest, Error as ClientError, ErrorKind as ClientErrorKind, Result,
};
}
pub mod connection_cache {
pub use solana_tpu_client::connection_cache::*;
}
pub mod nonblocking {
pub mod blockhash_query {
pub use solana_rpc_client_nonce_utils::nonblocking::blockhash_query::*;
}
/// Durable transaction nonce helpers.
pub mod nonce_utils {
pub use solana_rpc_client_nonce_utils::nonblocking::*;
}
pub mod pubsub_client {
pub use solana_pubsub_client::nonblocking::pubsub_client::*;
}
/// Simple nonblocking client that connects to a given UDP port with the QUIC protocol
/// and provides an interface for sending transactions which is restricted by the
/// server's flow control.
pub mod quic_client {
pub use solana_tpu_client::nonblocking::quic_client::*;
}
/// Communication with a Solana node over RPC asynchronously .
///
/// Software that interacts with the Solana blockchain, whether querying its
/// state or submitting transactions, communicates with a Solana node over
/// [JSON-RPC], using the [`RpcClient`] type.
///
/// [JSON-RPC]: https://www.jsonrpc.org/specification
/// [`RpcClient`]: crate::nonblocking::rpc_client::RpcClient
pub mod rpc_client {
pub use solana_rpc_client::nonblocking::rpc_client::*;
}
pub mod tpu_client {
pub use solana_tpu_client::nonblocking::tpu_client::*;
}
/// Trait defining async send functions, to be used for UDP or QUIC sending
pub mod tpu_connection {
pub use solana_tpu_client::nonblocking::tpu_connection::*;
}
/// Simple UDP client that communicates with the given UDP port with UDP and provides
/// an interface for sending transactions
pub mod udp_client {
pub use solana_tpu_client::nonblocking::udp_client::*;
}
}
/// Durable transaction nonce helpers.
pub mod nonce_utils {
pub use solana_rpc_client_nonce_utils::*;
@ -63,11 +29,6 @@ pub mod nonce_utils {
pub mod pubsub_client {
pub use solana_pubsub_client::pubsub_client::*;
}
/// Simple client that connects to a given UDP port with the QUIC protocol and provides
/// an interface for sending transactions which is restricted by the server's flow control.
pub mod quic_client {
pub use solana_tpu_client::quic_client::*;
}
/// Communication with a Solana node over RPC.
///
/// Software that interacts with the Solana blockchain, whether querying its
@ -102,21 +63,3 @@ pub mod rpc_response {
pub mod rpc_sender {
pub use solana_rpc_client::rpc_sender::*;
}
/// The `thin_client` module is a client-side object that interfaces with
/// a server-side TPU. Client code should use this object instead of writing
/// messages to the network directly. The binary encoding of its messages are
/// unstable and may change in future releases.
pub mod thin_client {
pub use solana_thin_client::thin_client::*;
}
pub mod tpu_client {
pub use solana_tpu_client::tpu_client::*;
}
pub mod tpu_connection {
pub use solana_tpu_client::tpu_connection::*;
}
/// Simple TPU client that communicates with the given UDP port with UDP and provides
/// an interface for sending transactions
pub mod udp_client {
pub use solana_tpu_client::udp_client::*;
}

View File

@ -0,0 +1,26 @@
pub mod quic_client;
pub mod tpu_client;
pub mod tpu_connection;
pub mod udp_client;
pub mod blockhash_query {
pub use solana_rpc_client_nonce_utils::nonblocking::blockhash_query::*;
}
/// Durable transaction nonce helpers.
pub mod nonce_utils {
pub use solana_rpc_client_nonce_utils::nonblocking::*;
}
pub mod pubsub_client {
pub use solana_pubsub_client::nonblocking::pubsub_client::*;
}
/// Communication with a Solana node over RPC asynchronously .
///
/// Software that interacts with the Solana blockchain, whether querying its
/// state or submitting transactions, communicates with a Solana node over
/// [JSON-RPC], using the [`RpcClient`] type.
///
/// [JSON-RPC]: https://www.jsonrpc.org/specification
/// [`RpcClient`]: crate::nonblocking::rpc_client::RpcClient
pub mod rpc_client {
pub use solana_rpc_client::nonblocking::rpc_client::*;
}

View File

@ -0,0 +1,59 @@
//! Simple nonblocking client that connects to a given UDP port with the QUIC protocol
//! and provides an interface for sending transactions which is restricted by the
//! server's flow control.
pub use solana_quic_client::nonblocking::quic_client::{
QuicClient, QuicClientCertificate, QuicError, QuicLazyInitializedEndpoint, QuicTpuConnection,
};
use {
crate::nonblocking::tpu_connection::TpuConnection,
async_trait::async_trait,
log::*,
solana_sdk::transport::Result as TransportResult,
solana_tpu_client::tpu_connection::ClientStats,
std::{net::SocketAddr, sync::Arc},
};
#[async_trait]
impl TpuConnection for QuicTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
self.client.tpu_addr()
}
async fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
let stats = ClientStats::default();
let len = buffers.len();
let res = self
.client
.send_batch(buffers, &stats, self.connection_stats.clone())
.await;
self.connection_stats
.add_client_stats(&stats, len, res.is_ok());
res?;
Ok(())
}
async fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
let stats = Arc::new(ClientStats::default());
let send_buffer =
self.client
.send_buffer(wire_transaction, &stats, self.connection_stats.clone());
if let Err(e) = send_buffer.await {
warn!(
"Failed to send transaction async to {}, error: {:?} ",
self.tpu_addr(),
e
);
datapoint_warn!("send-wire-async", ("failure", 1, i64),);
self.connection_stats.add_client_stats(&stats, 1, false);
} else {
self.connection_stats.add_client_stats(&stats, 1, true);
}
Ok(())
}
}

View File

@ -0,0 +1,343 @@
pub use solana_tpu_client::nonblocking::tpu_client::{LeaderTpuService, TpuSenderError};
use {
crate::{
connection_cache::ConnectionCache,
nonblocking::tpu_connection::TpuConnection,
tpu_client::{TpuClientConfig, MAX_FANOUT_SLOTS},
},
bincode::serialize,
futures_util::future::join_all,
solana_rpc_client::{nonblocking::rpc_client::RpcClient, spinner},
solana_rpc_client_api::request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
solana_sdk::{
message::Message,
signers::Signers,
transaction::{Transaction, TransactionError},
transport::{Result as TransportResult, TransportError},
},
solana_tpu_client::{
nonblocking::tpu_client::temporary_pub::*,
tpu_client::temporary_pub::{SEND_TRANSACTION_INTERVAL, TRANSACTION_RESEND_INTERVAL},
},
std::{
collections::HashMap,
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
},
tokio::time::{sleep, Duration, Instant},
};
/// Client which sends transactions directly to the current leader's TPU port over UDP.
/// The client uses RPC to determine the current leader and fetch node contact info
pub struct TpuClient {
fanout_slots: u64,
leader_tpu_service: LeaderTpuService,
exit: Arc<AtomicBool>,
rpc_client: Arc<RpcClient>,
connection_cache: Arc<ConnectionCache>,
}
async fn send_wire_transaction_to_addr(
connection_cache: &ConnectionCache,
addr: &SocketAddr,
wire_transaction: Vec<u8>,
) -> TransportResult<()> {
let conn = connection_cache.get_nonblocking_connection(addr);
conn.send_wire_transaction(wire_transaction.clone()).await
}
async fn send_wire_transaction_batch_to_addr(
connection_cache: &ConnectionCache,
addr: &SocketAddr,
wire_transactions: &[Vec<u8>],
) -> TransportResult<()> {
let conn = connection_cache.get_nonblocking_connection(addr);
conn.send_wire_transaction_batch(wire_transactions).await
}
impl TpuClient {
/// Serialize and send transaction to the current and upcoming leader TPUs according to fanout
/// size
pub async fn send_transaction(&self, transaction: &Transaction) -> bool {
let wire_transaction = serialize(transaction).expect("serialization should succeed");
self.send_wire_transaction(wire_transaction).await
}
/// Send a wire transaction to the current and upcoming leader TPUs according to fanout size
pub async fn send_wire_transaction(&self, wire_transaction: Vec<u8>) -> bool {
self.try_send_wire_transaction(wire_transaction)
.await
.is_ok()
}
/// Serialize and send transaction to the current and upcoming leader TPUs according to fanout
/// size
/// Returns the last error if all sends fail
pub async fn try_send_transaction(&self, transaction: &Transaction) -> TransportResult<()> {
let wire_transaction = serialize(transaction).expect("serialization should succeed");
self.try_send_wire_transaction(wire_transaction).await
}
/// Send a wire transaction to the current and upcoming leader TPUs according to fanout size
/// Returns the last error if all sends fail
pub async fn try_send_wire_transaction(
&self,
wire_transaction: Vec<u8>,
) -> TransportResult<()> {
let leaders = self
.leader_tpu_service
.leader_tpu_sockets(self.fanout_slots);
let futures = leaders
.iter()
.map(|addr| {
send_wire_transaction_to_addr(
&self.connection_cache,
addr,
wire_transaction.clone(),
)
})
.collect::<Vec<_>>();
let results: Vec<TransportResult<()>> = join_all(futures).await;
let mut last_error: Option<TransportError> = None;
let mut some_success = false;
for result in results {
if let Err(e) = result {
if last_error.is_none() {
last_error = Some(e);
}
} else {
some_success = true;
}
}
if !some_success {
Err(if let Some(err) = last_error {
err
} else {
std::io::Error::new(std::io::ErrorKind::Other, "No sends attempted").into()
})
} else {
Ok(())
}
}
/// Send a batch of wire transactions to the current and upcoming leader TPUs according to
/// fanout size
/// Returns the last error if all sends fail
pub async fn try_send_wire_transaction_batch(
&self,
wire_transactions: Vec<Vec<u8>>,
) -> TransportResult<()> {
let leaders = self
.leader_tpu_service
.leader_tpu_sockets(self.fanout_slots);
let futures = leaders
.iter()
.map(|addr| {
send_wire_transaction_batch_to_addr(
&self.connection_cache,
addr,
&wire_transactions,
)
})
.collect::<Vec<_>>();
let results: Vec<TransportResult<()>> = join_all(futures).await;
let mut last_error: Option<TransportError> = None;
let mut some_success = false;
for result in results {
if let Err(e) = result {
if last_error.is_none() {
last_error = Some(e);
}
} else {
some_success = true;
}
}
if !some_success {
Err(if let Some(err) = last_error {
err
} else {
std::io::Error::new(std::io::ErrorKind::Other, "No sends attempted").into()
})
} else {
Ok(())
}
}
/// Create a new client that disconnects when dropped
pub async fn new(
rpc_client: Arc<RpcClient>,
websocket_url: &str,
config: TpuClientConfig,
) -> Result<Self> {
let connection_cache = Arc::new(ConnectionCache::default());
Self::new_with_connection_cache(rpc_client, websocket_url, config, connection_cache).await
}
/// Create a new client that disconnects when dropped
pub async fn new_with_connection_cache(
rpc_client: Arc<RpcClient>,
websocket_url: &str,
config: TpuClientConfig,
connection_cache: Arc<ConnectionCache>,
) -> Result<Self> {
let exit = Arc::new(AtomicBool::new(false));
let leader_tpu_service =
LeaderTpuService::new(rpc_client.clone(), websocket_url, exit.clone()).await?;
Ok(Self {
fanout_slots: config.fanout_slots.clamp(1, MAX_FANOUT_SLOTS),
leader_tpu_service,
exit,
rpc_client,
connection_cache,
})
}
pub async fn send_and_confirm_messages_with_spinner<T: Signers>(
&self,
messages: &[Message],
signers: &T,
) -> Result<Vec<Option<TransactionError>>> {
let mut expired_blockhash_retries = 5;
let progress_bar = spinner::new_progress_bar();
progress_bar.set_message("Setting up...");
let mut transactions = messages
.iter()
.enumerate()
.map(|(i, message)| (i, Transaction::new_unsigned(message.clone())))
.collect::<Vec<_>>();
let total_transactions = transactions.len();
let mut transaction_errors = vec![None; transactions.len()];
let mut confirmed_transactions = 0;
let mut block_height = self.rpc_client.get_block_height().await?;
while expired_blockhash_retries > 0 {
let (blockhash, last_valid_block_height) = self
.rpc_client
.get_latest_blockhash_with_commitment(self.rpc_client.commitment())
.await?;
let mut pending_transactions = HashMap::new();
for (i, mut transaction) in transactions {
transaction.try_sign(signers, blockhash)?;
pending_transactions.insert(transaction.signatures[0], (i, transaction));
}
let mut last_resend = Instant::now() - TRANSACTION_RESEND_INTERVAL;
while block_height <= last_valid_block_height {
let num_transactions = pending_transactions.len();
// Periodically re-send all pending transactions
if Instant::now().duration_since(last_resend) > TRANSACTION_RESEND_INTERVAL {
for (index, (_i, transaction)) in pending_transactions.values().enumerate() {
if !self.send_transaction(transaction).await {
let _result = self.rpc_client.send_transaction(transaction).await.ok();
}
set_message_for_confirmed_transactions(
&progress_bar,
confirmed_transactions,
total_transactions,
None, //block_height,
last_valid_block_height,
&format!("Sending {}/{} transactions", index + 1, num_transactions,),
);
sleep(SEND_TRANSACTION_INTERVAL).await;
}
last_resend = Instant::now();
}
// Wait for the next block before checking for transaction statuses
let mut block_height_refreshes = 10;
set_message_for_confirmed_transactions(
&progress_bar,
confirmed_transactions,
total_transactions,
Some(block_height),
last_valid_block_height,
&format!(
"Waiting for next block, {} transactions pending...",
num_transactions
),
);
let mut new_block_height = block_height;
while block_height == new_block_height && block_height_refreshes > 0 {
sleep(Duration::from_millis(500)).await;
new_block_height = self.rpc_client.get_block_height().await?;
block_height_refreshes -= 1;
}
block_height = new_block_height;
// Collect statuses for the transactions, drop those that are confirmed
let pending_signatures = pending_transactions.keys().cloned().collect::<Vec<_>>();
for pending_signatures_chunk in
pending_signatures.chunks(MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS)
{
if let Ok(result) = self
.rpc_client
.get_signature_statuses(pending_signatures_chunk)
.await
{
let statuses = result.value;
for (signature, status) in
pending_signatures_chunk.iter().zip(statuses.into_iter())
{
if let Some(status) = status {
if status.satisfies_commitment(self.rpc_client.commitment()) {
if let Some((i, _)) = pending_transactions.remove(signature) {
confirmed_transactions += 1;
if status.err.is_some() {
progress_bar.println(format!(
"Failed transaction: {:?}",
status
));
}
transaction_errors[i] = status.err;
}
}
}
}
}
set_message_for_confirmed_transactions(
&progress_bar,
confirmed_transactions,
total_transactions,
Some(block_height),
last_valid_block_height,
"Checking transaction status...",
);
}
if pending_transactions.is_empty() {
return Ok(transaction_errors);
}
}
transactions = pending_transactions.into_values().collect();
progress_bar.println(format!(
"Blockhash expired. {} retries remaining",
expired_blockhash_retries
));
expired_blockhash_retries -= 1;
}
Err(TpuSenderError::Custom("Max retries exceeded".into()))
}
pub fn rpc_client(&self) -> &RpcClient {
&self.rpc_client
}
pub async fn shutdown(&mut self) {
self.exit.store(true, Ordering::Relaxed);
self.leader_tpu_service.join().await;
}
}
impl Drop for TpuClient {
fn drop(&mut self) {
self.exit.store(true, Ordering::Relaxed);
}
}

View File

@ -0,0 +1,42 @@
//! Trait defining async send functions, to be used for UDP or QUIC sending
use {
async_trait::async_trait,
enum_dispatch::enum_dispatch,
solana_quic_client::nonblocking::quic_client::QuicTpuConnection,
solana_sdk::{transaction::VersionedTransaction, transport::Result as TransportResult},
solana_udp_client::nonblocking::udp_client::UdpTpuConnection,
std::net::SocketAddr,
};
// Due to the existence of `crate::connection_cache::Connection`, if this is named
// `Connection`, enum_dispatch gets confused between the two and throws errors when
// trying to convert later.
#[enum_dispatch]
pub enum NonblockingConnection {
QuicTpuConnection,
UdpTpuConnection,
}
#[async_trait]
#[enum_dispatch(NonblockingConnection)]
pub trait TpuConnection {
fn tpu_addr(&self) -> &SocketAddr;
async fn serialize_and_send_transaction(
&self,
transaction: &VersionedTransaction,
) -> TransportResult<()> {
let wire_transaction =
bincode::serialize(transaction).expect("serialize Transaction in send_batch");
self.send_wire_transaction(&wire_transaction).await
}
async fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync;
async fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync;
}

View File

@ -0,0 +1,35 @@
//! Simple UDP client that communicates with the given UDP port with UDP and provides
//! an interface for sending transactions
pub use solana_udp_client::nonblocking::udp_client::UdpTpuConnection;
use {
crate::nonblocking::tpu_connection::TpuConnection, async_trait::async_trait,
core::iter::repeat, solana_sdk::transport::Result as TransportResult,
solana_streamer::nonblocking::sendmmsg::batch_send, std::net::SocketAddr,
};
#[async_trait]
impl TpuConnection for UdpTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
&self.addr
}
async fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
self.socket
.send_to(wire_transaction.as_ref(), self.addr)
.await?;
Ok(())
}
async fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
let pkts: Vec<_> = buffers.iter().zip(repeat(self.tpu_addr())).collect();
batch_send(&self.socket, &pkts).await?;
Ok(())
}
}

44
client/src/quic_client.rs Normal file
View File

@ -0,0 +1,44 @@
//! Simple client that connects to a given UDP port with the QUIC protocol and provides
//! an interface for sending transactions which is restricted by the server's flow control.
pub use solana_quic_client::quic_client::QuicTpuConnection;
use {
crate::{
nonblocking::tpu_connection::TpuConnection as NonblockingTpuConnection,
tpu_connection::TpuConnection,
},
solana_quic_client::quic_client::temporary_pub::*,
solana_sdk::transport::Result as TransportResult,
std::net::SocketAddr,
};
impl TpuConnection for QuicTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
self.inner.tpu_addr()
}
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
RUNTIME.block_on(self.inner.send_wire_transaction_batch(buffers))?;
Ok(())
}
fn send_wire_transaction_async(&self, wire_transaction: Vec<u8>) -> TransportResult<()> {
let _lock = ASYNC_TASK_SEMAPHORE.acquire();
let inner = self.inner.clone();
let _ = RUNTIME
.spawn(async move { send_wire_transaction_async(inner, wire_transaction).await });
Ok(())
}
fn send_wire_transaction_batch_async(&self, buffers: Vec<Vec<u8>>) -> TransportResult<()> {
let _lock = ASYNC_TASK_SEMAPHORE.acquire();
let inner = self.inner.clone();
let _ =
RUNTIME.spawn(async move { send_wire_transaction_batch_async(inner, buffers).await });
Ok(())
}
}

546
client/src/thin_client.rs Normal file
View File

@ -0,0 +1,546 @@
//! The `thin_client` module is a client-side object that interfaces with
//! a server-side TPU. Client code should use this object instead of writing
//! messages to the network directly. The binary encoding of its messages are
//! unstable and may change in future releases.
use {
crate::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
log::*,
solana_rpc_client::rpc_client::RpcClient,
solana_rpc_client_api::{config::RpcProgramAccountsConfig, response::Response},
solana_sdk::{
account::Account,
client::{AsyncClient, Client, SyncClient},
clock::{Slot, MAX_PROCESSING_AGE},
commitment_config::CommitmentConfig,
epoch_info::EpochInfo,
fee_calculator::{FeeCalculator, FeeRateGovernor},
hash::Hash,
instruction::Instruction,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signature, Signer},
signers::Signers,
system_instruction,
timing::duration_as_ms,
transaction::{self, Transaction, VersionedTransaction},
transport::Result as TransportResult,
},
solana_thin_client::thin_client::temporary_pub::*,
std::{
io,
net::SocketAddr,
sync::Arc,
time::{Duration, Instant},
},
};
/// An object for querying and sending transactions to the network.
pub struct ThinClient {
rpc_clients: Vec<RpcClient>,
tpu_addrs: Vec<SocketAddr>,
optimizer: ClientOptimizer,
connection_cache: Arc<ConnectionCache>,
}
impl ThinClient {
/// Create a new ThinClient that will interface with the Rpc at `rpc_addr` using TCP
/// and the Tpu at `tpu_addr` over `transactions_socket` using Quic or UDP
/// (currently hardcoded to UDP)
pub fn new(
rpc_addr: SocketAddr,
tpu_addr: SocketAddr,
connection_cache: Arc<ConnectionCache>,
) -> Self {
Self::new_from_client(RpcClient::new_socket(rpc_addr), tpu_addr, connection_cache)
}
pub fn new_socket_with_timeout(
rpc_addr: SocketAddr,
tpu_addr: SocketAddr,
timeout: Duration,
connection_cache: Arc<ConnectionCache>,
) -> Self {
let rpc_client = RpcClient::new_socket_with_timeout(rpc_addr, timeout);
Self::new_from_client(rpc_client, tpu_addr, connection_cache)
}
fn new_from_client(
rpc_client: RpcClient,
tpu_addr: SocketAddr,
connection_cache: Arc<ConnectionCache>,
) -> Self {
Self {
rpc_clients: vec![rpc_client],
tpu_addrs: vec![tpu_addr],
optimizer: ClientOptimizer::new(0),
connection_cache,
}
}
pub fn new_from_addrs(
rpc_addrs: Vec<SocketAddr>,
tpu_addrs: Vec<SocketAddr>,
connection_cache: Arc<ConnectionCache>,
) -> Self {
assert!(!rpc_addrs.is_empty());
assert_eq!(rpc_addrs.len(), tpu_addrs.len());
let rpc_clients: Vec<_> = rpc_addrs.into_iter().map(RpcClient::new_socket).collect();
let optimizer = ClientOptimizer::new(rpc_clients.len());
Self {
rpc_clients,
tpu_addrs,
optimizer,
connection_cache,
}
}
fn tpu_addr(&self) -> &SocketAddr {
&self.tpu_addrs[self.optimizer.best()]
}
pub fn rpc_client(&self) -> &RpcClient {
&self.rpc_clients[self.optimizer.best()]
}
/// Retry a sending a signed Transaction to the server for processing.
pub fn retry_transfer_until_confirmed(
&self,
keypair: &Keypair,
transaction: &mut Transaction,
tries: usize,
min_confirmed_blocks: usize,
) -> TransportResult<Signature> {
self.send_and_confirm_transaction(&[keypair], transaction, tries, min_confirmed_blocks)
}
/// Retry sending a signed Transaction with one signing Keypair to the server for processing.
pub fn retry_transfer(
&self,
keypair: &Keypair,
transaction: &mut Transaction,
tries: usize,
) -> TransportResult<Signature> {
self.send_and_confirm_transaction(&[keypair], transaction, tries, 0)
}
pub fn send_and_confirm_transaction<T: Signers>(
&self,
keypairs: &T,
transaction: &mut Transaction,
tries: usize,
pending_confirmations: usize,
) -> TransportResult<Signature> {
for x in 0..tries {
let now = Instant::now();
let mut num_confirmed = 0;
let mut wait_time = MAX_PROCESSING_AGE;
// resend the same transaction until the transaction has no chance of succeeding
let wire_transaction =
bincode::serialize(&transaction).expect("transaction serialization failed");
while now.elapsed().as_secs() < wait_time as u64 {
if num_confirmed == 0 {
let conn = self.connection_cache.get_connection(self.tpu_addr());
// Send the transaction if there has been no confirmation (e.g. the first time)
#[allow(clippy::needless_borrow)]
conn.send_wire_transaction(&wire_transaction)?;
}
if let Ok(confirmed_blocks) = self.poll_for_signature_confirmation(
&transaction.signatures[0],
pending_confirmations,
) {
num_confirmed = confirmed_blocks;
if confirmed_blocks >= pending_confirmations {
return Ok(transaction.signatures[0]);
}
// Since network has seen the transaction, wait longer to receive
// all pending confirmations. Resending the transaction could result into
// extra transaction fees
wait_time = wait_time.max(
MAX_PROCESSING_AGE * pending_confirmations.saturating_sub(num_confirmed),
);
}
}
info!("{} tries failed transfer to {}", x, self.tpu_addr());
let blockhash = self.get_latest_blockhash()?;
transaction.sign(keypairs, blockhash);
}
Err(io::Error::new(
io::ErrorKind::Other,
format!("retry_transfer failed in {} retries", tries),
)
.into())
}
pub fn poll_get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> {
self.poll_get_balance_with_commitment(pubkey, CommitmentConfig::default())
}
pub fn poll_get_balance_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> TransportResult<u64> {
self.rpc_client()
.poll_get_balance_with_commitment(pubkey, commitment_config)
.map_err(|e| e.into())
}
pub fn wait_for_balance(&self, pubkey: &Pubkey, expected_balance: Option<u64>) -> Option<u64> {
self.rpc_client().wait_for_balance_with_commitment(
pubkey,
expected_balance,
CommitmentConfig::default(),
)
}
pub fn get_program_accounts_with_config(
&self,
pubkey: &Pubkey,
config: RpcProgramAccountsConfig,
) -> TransportResult<Vec<(Pubkey, Account)>> {
self.rpc_client()
.get_program_accounts_with_config(pubkey, config)
.map_err(|e| e.into())
}
pub fn wait_for_balance_with_commitment(
&self,
pubkey: &Pubkey,
expected_balance: Option<u64>,
commitment_config: CommitmentConfig,
) -> Option<u64> {
self.rpc_client().wait_for_balance_with_commitment(
pubkey,
expected_balance,
commitment_config,
)
}
pub fn poll_for_signature_with_commitment(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
) -> TransportResult<()> {
self.rpc_client()
.poll_for_signature_with_commitment(signature, commitment_config)
.map_err(|e| e.into())
}
pub fn get_num_blocks_since_signature_confirmation(
&mut self,
sig: &Signature,
) -> TransportResult<usize> {
self.rpc_client()
.get_num_blocks_since_signature_confirmation(sig)
.map_err(|e| e.into())
}
}
impl Client for ThinClient {
fn tpu_addr(&self) -> String {
self.tpu_addr().to_string()
}
}
impl SyncClient for ThinClient {
fn send_and_confirm_message<T: Signers>(
&self,
keypairs: &T,
message: Message,
) -> TransportResult<Signature> {
let blockhash = self.get_latest_blockhash()?;
let mut transaction = Transaction::new(keypairs, message, blockhash);
let signature = self.send_and_confirm_transaction(keypairs, &mut transaction, 5, 0)?;
Ok(signature)
}
fn send_and_confirm_instruction(
&self,
keypair: &Keypair,
instruction: Instruction,
) -> TransportResult<Signature> {
let message = Message::new(&[instruction], Some(&keypair.pubkey()));
self.send_and_confirm_message(&[keypair], message)
}
fn transfer_and_confirm(
&self,
lamports: u64,
keypair: &Keypair,
pubkey: &Pubkey,
) -> TransportResult<Signature> {
let transfer_instruction =
system_instruction::transfer(&keypair.pubkey(), pubkey, lamports);
self.send_and_confirm_instruction(keypair, transfer_instruction)
}
fn get_account_data(&self, pubkey: &Pubkey) -> TransportResult<Option<Vec<u8>>> {
Ok(self.rpc_client().get_account_data(pubkey).ok())
}
fn get_account(&self, pubkey: &Pubkey) -> TransportResult<Option<Account>> {
let account = self.rpc_client().get_account(pubkey);
match account {
Ok(value) => Ok(Some(value)),
Err(_) => Ok(None),
}
}
fn get_account_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> TransportResult<Option<Account>> {
self.rpc_client()
.get_account_with_commitment(pubkey, commitment_config)
.map_err(|e| e.into())
.map(|r| r.value)
}
fn get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> {
self.rpc_client().get_balance(pubkey).map_err(|e| e.into())
}
fn get_balance_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> TransportResult<u64> {
self.rpc_client()
.get_balance_with_commitment(pubkey, commitment_config)
.map_err(|e| e.into())
.map(|r| r.value)
}
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> TransportResult<u64> {
self.rpc_client()
.get_minimum_balance_for_rent_exemption(data_len)
.map_err(|e| e.into())
}
fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
#[allow(deprecated)]
let (blockhash, fee_calculator, _last_valid_slot) =
self.get_recent_blockhash_with_commitment(CommitmentConfig::default())?;
Ok((blockhash, fee_calculator))
}
fn get_recent_blockhash_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> TransportResult<(Hash, FeeCalculator, Slot)> {
let index = self.optimizer.experiment();
let now = Instant::now();
#[allow(deprecated)]
let recent_blockhash =
self.rpc_clients[index].get_recent_blockhash_with_commitment(commitment_config);
match recent_blockhash {
Ok(Response { value, .. }) => {
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
Ok((value.0, value.1, value.2))
}
Err(e) => {
self.optimizer.report(index, std::u64::MAX);
Err(e.into())
}
}
}
fn get_fee_calculator_for_blockhash(
&self,
blockhash: &Hash,
) -> TransportResult<Option<FeeCalculator>> {
#[allow(deprecated)]
self.rpc_client()
.get_fee_calculator_for_blockhash(blockhash)
.map_err(|e| e.into())
}
fn get_fee_rate_governor(&self) -> TransportResult<FeeRateGovernor> {
#[allow(deprecated)]
self.rpc_client()
.get_fee_rate_governor()
.map_err(|e| e.into())
.map(|r| r.value)
}
fn get_signature_status(
&self,
signature: &Signature,
) -> TransportResult<Option<transaction::Result<()>>> {
let status = self
.rpc_client()
.get_signature_status(signature)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("send_transaction failed with error {:?}", err),
)
})?;
Ok(status)
}
fn get_signature_status_with_commitment(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
) -> TransportResult<Option<transaction::Result<()>>> {
let status = self
.rpc_client()
.get_signature_status_with_commitment(signature, commitment_config)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("send_transaction failed with error {:?}", err),
)
})?;
Ok(status)
}
fn get_slot(&self) -> TransportResult<u64> {
self.get_slot_with_commitment(CommitmentConfig::default())
}
fn get_slot_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> TransportResult<u64> {
let slot = self
.rpc_client()
.get_slot_with_commitment(commitment_config)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("send_transaction failed with error {:?}", err),
)
})?;
Ok(slot)
}
fn get_epoch_info(&self) -> TransportResult<EpochInfo> {
self.rpc_client().get_epoch_info().map_err(|e| e.into())
}
fn get_transaction_count(&self) -> TransportResult<u64> {
let index = self.optimizer.experiment();
let now = Instant::now();
match self.rpc_client().get_transaction_count() {
Ok(transaction_count) => {
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
Ok(transaction_count)
}
Err(e) => {
self.optimizer.report(index, std::u64::MAX);
Err(e.into())
}
}
}
fn get_transaction_count_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> TransportResult<u64> {
let index = self.optimizer.experiment();
let now = Instant::now();
match self
.rpc_client()
.get_transaction_count_with_commitment(commitment_config)
{
Ok(transaction_count) => {
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
Ok(transaction_count)
}
Err(e) => {
self.optimizer.report(index, std::u64::MAX);
Err(e.into())
}
}
}
/// Poll the server until the signature has been confirmed by at least `min_confirmed_blocks`
fn poll_for_signature_confirmation(
&self,
signature: &Signature,
min_confirmed_blocks: usize,
) -> TransportResult<usize> {
self.rpc_client()
.poll_for_signature_confirmation(signature, min_confirmed_blocks)
.map_err(|e| e.into())
}
fn poll_for_signature(&self, signature: &Signature) -> TransportResult<()> {
self.rpc_client()
.poll_for_signature(signature)
.map_err(|e| e.into())
}
fn get_new_blockhash(&self, blockhash: &Hash) -> TransportResult<(Hash, FeeCalculator)> {
#[allow(deprecated)]
self.rpc_client()
.get_new_blockhash(blockhash)
.map_err(|e| e.into())
}
fn get_latest_blockhash(&self) -> TransportResult<Hash> {
let (blockhash, _) =
self.get_latest_blockhash_with_commitment(CommitmentConfig::default())?;
Ok(blockhash)
}
fn get_latest_blockhash_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> TransportResult<(Hash, u64)> {
let index = self.optimizer.experiment();
let now = Instant::now();
match self.rpc_clients[index].get_latest_blockhash_with_commitment(commitment_config) {
Ok((blockhash, last_valid_block_height)) => {
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
Ok((blockhash, last_valid_block_height))
}
Err(e) => {
self.optimizer.report(index, std::u64::MAX);
Err(e.into())
}
}
}
fn is_blockhash_valid(
&self,
blockhash: &Hash,
commitment_config: CommitmentConfig,
) -> TransportResult<bool> {
self.rpc_client()
.is_blockhash_valid(blockhash, commitment_config)
.map_err(|e| e.into())
}
fn get_fee_for_message(&self, message: &Message) -> TransportResult<u64> {
self.rpc_client()
.get_fee_for_message(message)
.map_err(|e| e.into())
}
}
impl AsyncClient for ThinClient {
fn async_send_versioned_transaction(
&self,
transaction: VersionedTransaction,
) -> TransportResult<Signature> {
let conn = self.connection_cache.get_connection(self.tpu_addr());
conn.serialize_and_send_transaction(&transaction)?;
Ok(transaction.signatures[0])
}
fn async_send_versioned_transaction_batch(
&self,
batch: Vec<VersionedTransaction>,
) -> TransportResult<()> {
let conn = self.connection_cache.get_connection(self.tpu_addr());
conn.par_serialize_and_send_transaction_batch(&batch[..])?;
Ok(())
}
}

132
client/src/tpu_client.rs Normal file
View File

@ -0,0 +1,132 @@
pub use {
crate::nonblocking::tpu_client::TpuSenderError,
solana_tpu_client::tpu_client::{TpuClientConfig, DEFAULT_FANOUT_SLOTS, MAX_FANOUT_SLOTS},
};
use {
crate::{
connection_cache::ConnectionCache,
nonblocking::tpu_client::TpuClient as NonblockingTpuClient,
},
rayon::iter::{IntoParallelIterator, ParallelIterator},
solana_rpc_client::rpc_client::RpcClient,
solana_sdk::{
message::Message,
signers::Signers,
transaction::{Transaction, TransactionError},
transport::Result as TransportResult,
},
solana_tpu_client::tpu_client::temporary_pub::Result,
std::{net::UdpSocket, sync::Arc},
};
/// Client which sends transactions directly to the current leader's TPU port over UDP.
/// The client uses RPC to determine the current leader and fetch node contact info
pub struct TpuClient {
_deprecated: UdpSocket, // TpuClient now uses the connection_cache to choose a send_socket
//todo: get rid of this field
rpc_client: Arc<RpcClient>,
tpu_client: Arc<NonblockingTpuClient>,
}
impl TpuClient {
/// Serialize and send transaction to the current and upcoming leader TPUs according to fanout
/// size
pub fn send_transaction(&self, transaction: &Transaction) -> bool {
self.invoke(self.tpu_client.send_transaction(transaction))
}
/// Send a wire transaction to the current and upcoming leader TPUs according to fanout size
pub fn send_wire_transaction(&self, wire_transaction: Vec<u8>) -> bool {
self.invoke(self.tpu_client.send_wire_transaction(wire_transaction))
}
/// Serialize and send transaction to the current and upcoming leader TPUs according to fanout
/// size
/// Returns the last error if all sends fail
pub fn try_send_transaction(&self, transaction: &Transaction) -> TransportResult<()> {
self.invoke(self.tpu_client.try_send_transaction(transaction))
}
/// Serialize and send a batch of transactions to the current and upcoming leader TPUs according
/// to fanout size
/// Returns the last error if all sends fail
pub fn try_send_transaction_batch(&self, transactions: &[Transaction]) -> TransportResult<()> {
let wire_transactions = transactions
.into_par_iter()
.map(|tx| bincode::serialize(&tx).expect("serialize Transaction in send_batch"))
.collect::<Vec<_>>();
self.invoke(
self.tpu_client
.try_send_wire_transaction_batch(wire_transactions),
)
}
/// Send a wire transaction to the current and upcoming leader TPUs according to fanout size
/// Returns the last error if all sends fail
pub fn try_send_wire_transaction(&self, wire_transaction: Vec<u8>) -> TransportResult<()> {
self.invoke(self.tpu_client.try_send_wire_transaction(wire_transaction))
}
/// Create a new client that disconnects when dropped
pub fn new(
rpc_client: Arc<RpcClient>,
websocket_url: &str,
config: TpuClientConfig,
) -> Result<Self> {
let create_tpu_client =
NonblockingTpuClient::new(rpc_client.get_inner_client().clone(), websocket_url, config);
let tpu_client =
tokio::task::block_in_place(|| rpc_client.runtime().block_on(create_tpu_client))?;
Ok(Self {
_deprecated: UdpSocket::bind("0.0.0.0:0").unwrap(),
rpc_client,
tpu_client: Arc::new(tpu_client),
})
}
/// Create a new client that disconnects when dropped
pub fn new_with_connection_cache(
rpc_client: Arc<RpcClient>,
websocket_url: &str,
config: TpuClientConfig,
connection_cache: Arc<ConnectionCache>,
) -> Result<Self> {
let create_tpu_client = NonblockingTpuClient::new_with_connection_cache(
rpc_client.get_inner_client().clone(),
websocket_url,
config,
connection_cache,
);
let tpu_client =
tokio::task::block_in_place(|| rpc_client.runtime().block_on(create_tpu_client))?;
Ok(Self {
_deprecated: UdpSocket::bind("0.0.0.0:0").unwrap(),
rpc_client,
tpu_client: Arc::new(tpu_client),
})
}
pub fn send_and_confirm_messages_with_spinner<T: Signers>(
&self,
messages: &[Message],
signers: &T,
) -> Result<Vec<Option<TransactionError>>> {
self.invoke(
self.tpu_client
.send_and_confirm_messages_with_spinner(messages, signers),
)
}
pub fn rpc_client(&self) -> &RpcClient {
&self.rpc_client
}
fn invoke<T, F: std::future::Future<Output = T>>(&self, f: F) -> T {
// `block_on()` panics if called within an asynchronous execution context. Whereas
// `block_in_place()` only panics if called from a current_thread runtime, which is the
// lesser evil.
tokio::task::block_in_place(move || self.rpc_client.runtime().block_on(f))
}
}

View File

@ -0,0 +1,56 @@
pub use solana_tpu_client::tpu_connection::ClientStats;
use {
enum_dispatch::enum_dispatch,
rayon::iter::{IntoParallelIterator, ParallelIterator},
solana_quic_client::quic_client::QuicTpuConnection,
solana_sdk::{transaction::VersionedTransaction, transport::Result as TransportResult},
solana_udp_client::udp_client::UdpTpuConnection,
std::net::SocketAddr,
};
#[enum_dispatch]
pub enum BlockingConnection {
UdpTpuConnection,
QuicTpuConnection,
}
#[enum_dispatch(BlockingConnection)]
pub trait TpuConnection {
fn tpu_addr(&self) -> &SocketAddr;
fn serialize_and_send_transaction(
&self,
transaction: &VersionedTransaction,
) -> TransportResult<()> {
let wire_transaction =
bincode::serialize(transaction).expect("serialize Transaction in send_batch");
self.send_wire_transaction(wire_transaction)
}
fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
self.send_wire_transaction_batch(&[wire_transaction])
}
fn send_wire_transaction_async(&self, wire_transaction: Vec<u8>) -> TransportResult<()>;
fn par_serialize_and_send_transaction_batch(
&self,
transactions: &[VersionedTransaction],
) -> TransportResult<()> {
let buffers = transactions
.into_par_iter()
.map(|tx| bincode::serialize(&tx).expect("serialize Transaction in send_batch"))
.collect::<Vec<_>>();
self.send_wire_transaction_batch(&buffers)
}
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync;
fn send_wire_transaction_batch_async(&self, buffers: Vec<Vec<u8>>) -> TransportResult<()>;
}

View File

@ -1,39 +1,13 @@
//! Simple TPU client that communicates with the given UDP port with UDP and provides
//! an interface for sending transactions
pub use solana_udp_client::udp_client::UdpTpuConnection;
use {
crate::{connection_cache_stats::ConnectionCacheStats, tpu_connection::TpuConnection},
core::iter::repeat,
solana_sdk::transport::Result as TransportResult,
solana_streamer::sendmmsg::batch_send,
std::{
net::{SocketAddr, UdpSocket},
sync::Arc,
},
crate::tpu_connection::TpuConnection, core::iter::repeat,
solana_sdk::transport::Result as TransportResult, solana_streamer::sendmmsg::batch_send,
std::net::SocketAddr,
};
pub struct UdpTpuConnection {
socket: Arc<UdpSocket>,
addr: SocketAddr,
}
impl UdpTpuConnection {
pub fn new_from_addr(local_socket: Arc<UdpSocket>, tpu_addr: SocketAddr) -> Self {
Self {
socket: local_socket,
addr: tpu_addr,
}
}
pub fn new(
local_socket: Arc<UdpSocket>,
tpu_addr: SocketAddr,
_connection_stats: Arc<ConnectionCacheStats>,
) -> Self {
Self::new_from_addr(local_socket, tpu_addr)
}
}
impl TpuConnection for UdpTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
&self.addr

View File

@ -38,6 +38,7 @@ serde = "1.0.144"
serde_derive = "1.0.103"
solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.15.0" }
solana-bloom = { path = "../bloom", version = "=1.15.0" }
solana-client = { path = "../client", version = "=1.15.0" }
solana-entry = { path = "../entry", version = "=1.15.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.15.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.15.0" }

View File

@ -8,6 +8,7 @@ use {
log::*,
rand::{thread_rng, Rng},
rayon::prelude::*,
solana_client::connection_cache::ConnectionCache,
solana_core::{
banking_stage::{BankingStage, BankingStageStats},
leader_slot_banking_stage_metrics::LeaderSlotMetricsTracker,
@ -37,7 +38,6 @@ use {
transaction::{Transaction, VersionedTransaction},
},
solana_streamer::socket::SocketAddrSpace,
solana_tpu_client::connection_cache::ConnectionCache,
solana_vote_program::{
vote_state::VoteStateUpdate, vote_transaction::new_vote_state_update_transaction,
},

View File

@ -26,6 +26,7 @@ use {
},
histogram::Histogram,
itertools::Itertools,
solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
solana_entry::entry::hash_transactions,
solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
solana_ledger::{
@ -66,7 +67,6 @@ use {
transport::TransportError,
},
solana_streamer::sendmmsg::batch_send,
solana_tpu_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
solana_transaction_status::{
token_balances::TransactionTokenBalancesSet, TransactionTokenBalance,
},

View File

@ -13,7 +13,7 @@ use {
solana_streamer::streamer::{
self, PacketBatchReceiver, PacketBatchSender, StreamerReceiveStats,
},
solana_tpu_client::connection_cache::DEFAULT_TPU_ENABLE_UDP,
solana_tpu_client::tpu_connection_cache::DEFAULT_TPU_ENABLE_UDP,
std::{
net::UdpSocket,
sync::{

View File

@ -16,6 +16,7 @@ use {
staked_nodes_updater_service::StakedNodesUpdaterService,
},
crossbeam_channel::{unbounded, Receiver},
solana_client::connection_cache::ConnectionCache,
solana_gossip::cluster_info::ClusterInfo,
solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusSender},
solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry},
@ -34,7 +35,6 @@ use {
quic::{spawn_server, StreamStats, MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS},
streamer::StakedNodes,
},
solana_tpu_client::connection_cache::ConnectionCache,
std::{
collections::HashMap,
net::UdpSocket,

View File

@ -28,6 +28,7 @@ use {
window_service::WindowService,
},
crossbeam_channel::{unbounded, Receiver},
solana_client::connection_cache::ConnectionCache,
solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
solana_gossip::cluster_info::ClusterInfo,
solana_ledger::{
@ -45,7 +46,6 @@ use {
prioritization_fee_cache::PrioritizationFeeCache, vote_sender_types::ReplayVoteSender,
},
solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair},
solana_tpu_client::connection_cache::ConnectionCache,
std::{
collections::HashSet,
net::UdpSocket,

View File

@ -25,6 +25,7 @@ use {
},
crossbeam_channel::{bounded, unbounded, Receiver},
rand::{thread_rng, Rng},
solana_client::connection_cache::ConnectionCache,
solana_entry::poh::compute_hash_time_ns,
solana_geyser_plugin_manager::geyser_plugin_service::GeyserPluginService,
solana_gossip::{
@ -99,7 +100,6 @@ use {
},
solana_send_transaction_service::send_transaction_service,
solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes},
solana_tpu_client::connection_cache::ConnectionCache,
solana_vote_program::vote_state,
std::{
collections::{HashMap, HashSet},
@ -2153,7 +2153,7 @@ mod tests {
crossbeam_channel::{bounded, RecvTimeoutError},
solana_ledger::{create_new_tmp_ledger, genesis_utils::create_genesis_config_with_leader},
solana_sdk::{genesis_config::create_genesis_config, poh_config::PohConfig},
solana_tpu_client::connection_cache::{
solana_tpu_client::tpu_connection_cache::{
DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, DEFAULT_TPU_USE_QUIC,
},
std::{fs::remove_dir_all, thread, time::Duration},

View File

@ -3,9 +3,9 @@
use {
rand::{thread_rng, Rng},
solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
solana_gossip::cluster_info::ClusterInfo,
solana_poh::poh_recorder::PohRecorder,
solana_tpu_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
std::{
sync::{
atomic::{AtomicBool, Ordering},

View File

@ -18,6 +18,7 @@ log = "0.4.17"
rand = "0.7.0"
serde = "1.0.144"
solana-bench-tps = { path = "../bench-tps", version = "=1.15.0" }
solana-client = { path = "../client", version = "=1.15.0" }
solana-core = { path = "../core", version = "=1.15.0" }
solana-faucet = { path = "../faucet", version = "=1.15.0" }
solana-gossip = { path = "../gossip", version = "=1.15.0" }

View File

@ -45,6 +45,7 @@ use {
log::*,
rand::{thread_rng, Rng},
solana_bench_tps::{bench::generate_and_fund_keypairs, bench_tps_client::BenchTpsClient},
solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
solana_core::serve_repair::{RepairProtocol, RepairRequestHeader, ServeRepair},
solana_dos::cli::*,
solana_gossip::{
@ -66,10 +67,7 @@ use {
transaction::Transaction,
},
solana_streamer::socket::SocketAddrSpace,
solana_tpu_client::{
connection_cache::{ConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE},
tpu_connection::TpuConnection,
},
solana_tpu_client::tpu_connection_cache::DEFAULT_TPU_CONNECTION_POOL_SIZE,
std::{
net::{SocketAddr, UdpSocket},
process::exit,
@ -786,6 +784,7 @@ fn main() {
pub mod test {
use {
super::*,
solana_client::thin_client::ThinClient,
solana_core::validator::ValidatorConfig,
solana_faucet::faucet::run_local_faucet,
solana_local_cluster::{
@ -795,7 +794,6 @@ pub mod test {
},
solana_rpc::rpc::JsonRpcConfig,
solana_sdk::timing::timestamp,
solana_thin_client::thin_client::ThinClient,
};
const TEST_SEND_BATCH_SIZE: usize = 1;

View File

@ -29,6 +29,7 @@ serde_bytes = "0.11"
serde_derive = "1.0.103"
solana-bloom = { path = "../bloom", version = "=1.15.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.15.0" }
solana-client = { path = "../client", version = "=1.15.0" }
solana-entry = { path = "../entry", version = "=1.15.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.15.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.15.0" }

View File

@ -4,6 +4,7 @@ use {
crate::{cluster_info::ClusterInfo, contact_info::ContactInfo},
crossbeam_channel::{unbounded, Sender},
rand::{thread_rng, Rng},
solana_client::{connection_cache::ConnectionCache, thin_client::ThinClient},
solana_perf::recycler::Recycler,
solana_runtime::bank_forks::BankForks,
solana_sdk::{
@ -14,8 +15,6 @@ use {
socket::SocketAddrSpace,
streamer::{self, StreamerReceiveStats},
},
solana_thin_client::thin_client::ThinClient,
solana_tpu_client::connection_cache::ConnectionCache,
std::{
collections::HashSet,
net::{SocketAddr, TcpListener, UdpSocket},

View File

@ -16,6 +16,7 @@ itertools = "0.10.5"
log = "0.4.17"
rand = "0.7.0"
rayon = "1.5.3"
solana-client = { path = "../client", version = "=1.15.0" }
solana-config-program = { path = "../programs/config", version = "=1.15.0" }
solana-core = { path = "../core", version = "=1.15.0" }
solana-entry = { path = "../entry", version = "=1.15.0" }

View File

@ -1,9 +1,9 @@
use {
solana_client::thin_client::ThinClient,
solana_core::validator::{Validator, ValidatorConfig},
solana_gossip::{cluster_info::Node, contact_info::ContactInfo},
solana_sdk::{pubkey::Pubkey, signature::Keypair},
solana_streamer::socket::SocketAddrSpace,
solana_thin_client::thin_client::ThinClient,
std::{path::PathBuf, sync::Arc},
};

View File

@ -6,6 +6,7 @@ use log::*;
use {
rand::{thread_rng, Rng},
rayon::prelude::*,
solana_client::{connection_cache::ConnectionCache, thin_client::ThinClient},
solana_core::consensus::VOTE_THRESHOLD_DEPTH,
solana_entry::entry::{Entry, EntrySlice},
solana_gossip::{
@ -31,8 +32,6 @@ use {
transport::TransportError,
},
solana_streamer::socket::SocketAddrSpace,
solana_thin_client::thin_client::ThinClient,
solana_tpu_client::connection_cache::ConnectionCache,
solana_vote_program::vote_transaction,
std::{
collections::{HashMap, HashSet},

View File

@ -6,6 +6,7 @@ use {
},
itertools::izip,
log::*,
solana_client::{connection_cache::ConnectionCache, thin_client::ThinClient},
solana_core::{
tower_storage::FileTowerStorage,
validator::{Validator, ValidatorConfig, ValidatorStartProgress},
@ -41,10 +42,8 @@ use {
},
solana_stake_program::{config::create_account as create_stake_config_account, stake_state},
solana_streamer::socket::SocketAddrSpace,
solana_thin_client::thin_client::ThinClient,
solana_tpu_client::connection_cache::{
ConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP,
DEFAULT_TPU_USE_QUIC,
solana_tpu_client::tpu_connection_cache::{
DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, DEFAULT_TPU_USE_QUIC,
},
solana_vote_program::{
vote_instruction,

View File

@ -6,6 +6,7 @@ use {
gag::BufferRedirect,
log::*,
serial_test::serial,
solana_client::thin_client::ThinClient,
solana_core::{
broadcast_stage::BroadcastStageType,
consensus::{Tower, SWITCH_FORK_THRESHOLD, VOTE_THRESHOLD_DEPTH},
@ -53,7 +54,6 @@ use {
system_program, system_transaction,
},
solana_streamer::socket::SocketAddrSpace,
solana_thin_client::thin_client::ThinClient,
solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY,
std::{
collections::{HashMap, HashSet},

View File

@ -4145,7 +4145,6 @@ dependencies = [
"solana-runtime",
"solana-sdk 1.15.0",
"solana-send-transaction-service",
"solana-tpu-client",
"tarpc",
"tokio",
"tokio-serde",
@ -4257,15 +4256,31 @@ dependencies = [
name = "solana-client"
version = "1.15.0"
dependencies = [
"async-trait",
"bincode",
"enum_dispatch",
"futures 0.3.24",
"futures-util",
"indexmap",
"indicatif",
"log",
"rand 0.7.3",
"rayon",
"solana-measure",
"solana-metrics",
"solana-net-utils",
"solana-pubsub-client",
"solana-quic-client",
"solana-rpc-client",
"solana-rpc-client-api",
"solana-rpc-client-nonce-utils",
"solana-sdk 1.15.0",
"solana-streamer",
"solana-thin-client",
"solana-tpu-client",
"solana-udp-client",
"thiserror",
"tokio",
]
[[package]]
@ -4317,6 +4332,7 @@ dependencies = [
"serde_derive",
"solana-address-lookup-table-program",
"solana-bloom",
"solana-client",
"solana-entry",
"solana-frozen-abi 1.15.0",
"solana-frozen-abi-macro 1.15.0",
@ -4554,6 +4570,7 @@ dependencies = [
"serde_derive",
"solana-bloom",
"solana-clap-utils",
"solana-client",
"solana-entry",
"solana-frozen-abi 1.15.0",
"solana-frozen-abi-macro 1.15.0",
@ -4913,6 +4930,31 @@ dependencies = [
"url 2.2.2",
]
[[package]]
name = "solana-quic-client"
version = "1.15.0"
dependencies = [
"async-mutex",
"async-trait",
"futures 0.3.24",
"itertools",
"lazy_static",
"log",
"quinn",
"quinn-proto",
"quinn-udp",
"rustls 0.20.6",
"solana-measure",
"solana-metrics",
"solana-net-utils",
"solana-rpc-client-api",
"solana-sdk 1.15.0",
"solana-streamer",
"solana-tpu-client",
"thiserror",
"tokio",
]
[[package]]
name = "solana-rayon-threadlimit"
version = "1.15.0"
@ -4962,6 +5004,7 @@ dependencies = [
"serde_json",
"soketto",
"solana-account-decoder",
"solana-client",
"solana-entry",
"solana-faucet",
"solana-gossip",
@ -5623,6 +5666,7 @@ version = "1.15.0"
dependencies = [
"crossbeam-channel",
"log",
"solana-client",
"solana-measure",
"solana-metrics",
"solana-runtime",
@ -5776,23 +5820,14 @@ dependencies = [
name = "solana-tpu-client"
version = "1.15.0"
dependencies = [
"async-mutex",
"async-trait",
"bincode",
"enum_dispatch",
"futures 0.3.24",
"futures-util",
"indexmap",
"indicatif",
"itertools",
"lazy_static",
"log",
"quinn",
"quinn-proto",
"quinn-udp",
"rand 0.7.3",
"rayon",
"rustls 0.20.6",
"solana-measure",
"solana-metrics",
"solana-net-utils",
@ -5800,7 +5835,6 @@ dependencies = [
"solana-rpc-client",
"solana-rpc-client-api",
"solana-sdk 1.15.0",
"solana-streamer",
"thiserror",
"tokio",
]
@ -5829,6 +5863,19 @@ dependencies = [
"thiserror",
]
[[package]]
name = "solana-udp-client"
version = "1.15.0"
dependencies = [
"async-trait",
"solana-net-utils",
"solana-sdk 1.15.0",
"solana-streamer",
"solana-tpu-client",
"thiserror",
"tokio",
]
[[package]]
name = "solana-validator"
version = "1.15.0"

View File

@ -10,11 +10,27 @@ documentation = "https://docs.rs/solana-quic-client"
edition = "2021"
[dependencies]
async-mutex = "1.4.0"
async-trait = "0.1.57"
futures = "0.3"
itertools = "0.10.5"
lazy_static = "1.4.0"
log = "0.4.17"
quinn = "0.8.4"
quinn-proto = "0.8.4"
quinn-udp = "0.1.3"
rustls = { version = "0.20.6", features = ["dangerous_configuration"] }
solana-measure = { path = "../measure", version = "=1.15.0" }
solana-metrics = { path = "../metrics", version = "=1.15.0" }
solana-net-utils = { path = "../net-utils", version = "=1.15.0" }
solana-rpc-client-api = { path = "../rpc-client-api", version = "=1.15.0" }
solana-sdk = { path = "../sdk", version = "=1.15.0" }
solana-streamer = { path = "../streamer", version = "=1.15.0" }
solana-tpu-client = { path = "../tpu-client", version = "=1.15.0" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
[dev-dependencies]
crossbeam-channel = "0.5"
solana-logger = { path = "../logger", version = "=1.15.0" }
solana-perf = { path = "../perf", version = "=1.15.0" }

View File

@ -3,6 +3,9 @@
pub mod nonblocking;
pub mod quic_client;
#[macro_use]
extern crate solana_metrics;
use {
crate::{
nonblocking::quic_client::{

View File

@ -1,5 +1,598 @@
//! Simple nonblocking client that connects to a given UDP port with the QUIC protocol
//! and provides an interface for sending transactions which is restricted by the
//! server's flow control.
use {
async_mutex::Mutex,
async_trait::async_trait,
futures::future::join_all,
itertools::Itertools,
log::*,
quinn::{
ClientConfig, ConnectError, ConnectionError, Endpoint, EndpointConfig, IdleTimeout,
NewConnection, VarInt, WriteError,
},
solana_measure::measure::Measure,
solana_net_utils::VALIDATOR_PORT_RANGE,
solana_rpc_client_api::client_error::ErrorKind as ClientErrorKind,
solana_sdk::{
quic::{
QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS, QUIC_KEEP_ALIVE_MS, QUIC_MAX_TIMEOUT_MS,
QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS,
},
signature::Keypair,
transport::Result as TransportResult,
},
solana_streamer::{
nonblocking::quic::ALPN_TPU_PROTOCOL_ID,
tls_certificates::new_self_signed_tls_certificate_chain,
},
solana_tpu_client::{
connection_cache_stats::ConnectionCacheStats, nonblocking::tpu_connection::TpuConnection,
tpu_connection::ClientStats,
},
std::{
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
sync::{atomic::Ordering, Arc},
thread,
time::Duration,
},
thiserror::Error,
tokio::{sync::RwLock, time::timeout},
};
pub use solana_tpu_client::nonblocking::quic_client::*;
struct SkipServerVerification;
impl SkipServerVerification {
pub fn new() -> Arc<Self> {
Arc::new(Self)
}
}
impl rustls::client::ServerCertVerifier for SkipServerVerification {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}
pub struct QuicClientCertificate {
pub certificates: Vec<rustls::Certificate>,
pub key: rustls::PrivateKey,
}
/// A lazy-initialized Quic Endpoint
pub struct QuicLazyInitializedEndpoint {
endpoint: RwLock<Option<Arc<Endpoint>>>,
client_certificate: Arc<QuicClientCertificate>,
}
#[derive(Error, Debug)]
pub enum QuicError {
#[error(transparent)]
WriteError(#[from] WriteError),
#[error(transparent)]
ConnectionError(#[from] ConnectionError),
#[error(transparent)]
ConnectError(#[from] ConnectError),
}
impl From<QuicError> for ClientErrorKind {
fn from(quic_error: QuicError) -> Self {
Self::Custom(format!("{:?}", quic_error))
}
}
impl QuicLazyInitializedEndpoint {
pub fn new(client_certificate: Arc<QuicClientCertificate>) -> Self {
Self {
endpoint: RwLock::new(None),
client_certificate,
}
}
fn create_endpoint(&self) -> Endpoint {
let (_, client_socket) = solana_net_utils::bind_in_range(
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
VALIDATOR_PORT_RANGE,
)
.expect("QuicLazyInitializedEndpoint::create_endpoint bind_in_range");
let mut crypto = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_custom_certificate_verifier(SkipServerVerification::new())
.with_single_cert(
self.client_certificate.certificates.clone(),
self.client_certificate.key.clone(),
)
.expect("Failed to set QUIC client certificates");
crypto.enable_early_data = true;
crypto.alpn_protocols = vec![ALPN_TPU_PROTOCOL_ID.to_vec()];
let mut endpoint =
QuicNewConnection::create_endpoint(EndpointConfig::default(), client_socket);
let mut config = ClientConfig::new(Arc::new(crypto));
let transport_config = Arc::get_mut(&mut config.transport)
.expect("QuicLazyInitializedEndpoint::create_endpoint Arc::get_mut");
let timeout = IdleTimeout::from(VarInt::from_u32(QUIC_MAX_TIMEOUT_MS));
transport_config.max_idle_timeout(Some(timeout));
transport_config.keep_alive_interval(Some(Duration::from_millis(QUIC_KEEP_ALIVE_MS)));
endpoint.set_default_client_config(config);
endpoint
}
async fn get_endpoint(&self) -> Arc<Endpoint> {
let lock = self.endpoint.read().await;
let endpoint = lock.as_ref();
match endpoint {
Some(endpoint) => endpoint.clone(),
None => {
drop(lock);
let mut lock = self.endpoint.write().await;
let endpoint = lock.as_ref();
match endpoint {
Some(endpoint) => endpoint.clone(),
None => {
let connection = Arc::new(self.create_endpoint());
*lock = Some(connection.clone());
connection
}
}
}
}
}
}
impl Default for QuicLazyInitializedEndpoint {
fn default() -> Self {
let (certs, priv_key) = new_self_signed_tls_certificate_chain(
&Keypair::new(),
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
)
.expect("Failed to create QUIC client certificate");
Self::new(Arc::new(QuicClientCertificate {
certificates: certs,
key: priv_key,
}))
}
}
/// A wrapper over NewConnection with additional capability to create the endpoint as part
/// of creating a new connection.
#[derive(Clone)]
struct QuicNewConnection {
endpoint: Arc<Endpoint>,
connection: Arc<NewConnection>,
}
impl QuicNewConnection {
/// Create a QuicNewConnection given the remote address 'addr'.
async fn make_connection(
endpoint: Arc<QuicLazyInitializedEndpoint>,
addr: SocketAddr,
stats: &ClientStats,
) -> Result<Self, QuicError> {
let mut make_connection_measure = Measure::start("make_connection_measure");
let endpoint = endpoint.get_endpoint().await;
let connecting = endpoint.connect(addr, "connect")?;
stats.total_connections.fetch_add(1, Ordering::Relaxed);
if let Ok(connecting_result) = timeout(
Duration::from_millis(QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS),
connecting,
)
.await
{
if connecting_result.is_err() {
stats.connection_errors.fetch_add(1, Ordering::Relaxed);
}
make_connection_measure.stop();
stats
.make_connection_ms
.fetch_add(make_connection_measure.as_ms(), Ordering::Relaxed);
let connection = connecting_result?;
Ok(Self {
endpoint,
connection: Arc::new(connection),
})
} else {
Err(ConnectionError::TimedOut.into())
}
}
fn create_endpoint(config: EndpointConfig, client_socket: UdpSocket) -> Endpoint {
quinn::Endpoint::new(config, None, client_socket)
.expect("QuicNewConnection::create_endpoint quinn::Endpoint::new")
.0
}
// Attempts to make a faster connection by taking advantage of pre-existing key material.
// Only works if connection to this endpoint was previously established.
async fn make_connection_0rtt(
&mut self,
addr: SocketAddr,
stats: &ClientStats,
) -> Result<Arc<NewConnection>, QuicError> {
let connecting = self.endpoint.connect(addr, "connect")?;
stats.total_connections.fetch_add(1, Ordering::Relaxed);
let connection = match connecting.into_0rtt() {
Ok((connection, zero_rtt)) => {
if let Ok(zero_rtt) = timeout(
Duration::from_millis(QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS),
zero_rtt,
)
.await
{
if zero_rtt {
stats.zero_rtt_accepts.fetch_add(1, Ordering::Relaxed);
} else {
stats.zero_rtt_rejects.fetch_add(1, Ordering::Relaxed);
}
connection
} else {
return Err(ConnectionError::TimedOut.into());
}
}
Err(connecting) => {
stats.connection_errors.fetch_add(1, Ordering::Relaxed);
if let Ok(connecting_result) = timeout(
Duration::from_millis(QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS),
connecting,
)
.await
{
connecting_result?
} else {
return Err(ConnectionError::TimedOut.into());
}
}
};
self.connection = Arc::new(connection);
Ok(self.connection.clone())
}
}
pub struct QuicClient {
endpoint: Arc<QuicLazyInitializedEndpoint>,
connection: Arc<Mutex<Option<QuicNewConnection>>>,
addr: SocketAddr,
stats: Arc<ClientStats>,
chunk_size: usize,
}
impl QuicClient {
pub fn new(
endpoint: Arc<QuicLazyInitializedEndpoint>,
addr: SocketAddr,
chunk_size: usize,
) -> Self {
Self {
endpoint,
connection: Arc::new(Mutex::new(None)),
addr,
stats: Arc::new(ClientStats::default()),
chunk_size,
}
}
async fn _send_buffer_using_conn(
data: &[u8],
connection: &NewConnection,
) -> Result<(), QuicError> {
let mut send_stream = connection.connection.open_uni().await?;
send_stream.write_all(data).await?;
send_stream.finish().await?;
Ok(())
}
// Attempts to send data, connecting/reconnecting as necessary
// On success, returns the connection used to successfully send the data
async fn _send_buffer(
&self,
data: &[u8],
stats: &ClientStats,
connection_stats: Arc<ConnectionCacheStats>,
) -> Result<Arc<NewConnection>, QuicError> {
let mut connection_try_count = 0;
let mut last_connection_id = 0;
let mut last_error = None;
while connection_try_count < 2 {
let connection = {
let mut conn_guard = self.connection.lock().await;
let maybe_conn = conn_guard.as_mut();
match maybe_conn {
Some(conn) => {
if conn.connection.connection.stable_id() == last_connection_id {
// this is the problematic connection we had used before, create a new one
let conn = conn.make_connection_0rtt(self.addr, stats).await;
match conn {
Ok(conn) => {
info!(
"Made 0rtt connection to {} with id {} try_count {}, last_connection_id: {}, last_error: {:?}",
self.addr,
conn.connection.stable_id(),
connection_try_count,
last_connection_id,
last_error,
);
connection_try_count += 1;
conn
}
Err(err) => {
info!(
"Cannot make 0rtt connection to {}, error {:}",
self.addr, err
);
return Err(err);
}
}
} else {
stats.connection_reuse.fetch_add(1, Ordering::Relaxed);
conn.connection.clone()
}
}
None => {
let conn = QuicNewConnection::make_connection(
self.endpoint.clone(),
self.addr,
stats,
)
.await;
match conn {
Ok(conn) => {
*conn_guard = Some(conn.clone());
info!(
"Made connection to {} id {} try_count {}",
self.addr,
conn.connection.connection.stable_id(),
connection_try_count
);
connection_try_count += 1;
conn.connection.clone()
}
Err(err) => {
info!("Cannot make connection to {}, error {:}", self.addr, err);
return Err(err);
}
}
}
}
};
let new_stats = connection.connection.stats();
connection_stats
.total_client_stats
.congestion_events
.update_stat(
&self.stats.congestion_events,
new_stats.path.congestion_events,
);
connection_stats
.total_client_stats
.tx_streams_blocked_uni
.update_stat(
&self.stats.tx_streams_blocked_uni,
new_stats.frame_tx.streams_blocked_uni,
);
connection_stats
.total_client_stats
.tx_data_blocked
.update_stat(&self.stats.tx_data_blocked, new_stats.frame_tx.data_blocked);
connection_stats
.total_client_stats
.tx_acks
.update_stat(&self.stats.tx_acks, new_stats.frame_tx.acks);
last_connection_id = connection.connection.stable_id();
match Self::_send_buffer_using_conn(data, &connection).await {
Ok(()) => {
return Ok(connection);
}
Err(err) => match err {
QuicError::ConnectionError(_) => {
last_error = Some(err);
}
_ => {
info!(
"Error sending to {} with id {}, error {:?} thread: {:?}",
self.addr,
connection.connection.stable_id(),
err,
thread::current().id(),
);
return Err(err);
}
},
}
}
// if we come here, that means we have exhausted maximum retries, return the error
info!(
"Ran into an error sending transactions {:?}, exhausted retries to {}",
last_error, self.addr
);
// If we get here but last_error is None, then we have a logic error
// in this function, so panic here with an expect to help debugging
Err(last_error.expect("QuicClient::_send_buffer last_error.expect"))
}
pub async fn send_buffer<T>(
&self,
data: T,
stats: &ClientStats,
connection_stats: Arc<ConnectionCacheStats>,
) -> Result<(), ClientErrorKind>
where
T: AsRef<[u8]>,
{
self._send_buffer(data.as_ref(), stats, connection_stats)
.await
.map_err(Into::<ClientErrorKind>::into)?;
Ok(())
}
pub async fn send_batch<T>(
&self,
buffers: &[T],
stats: &ClientStats,
connection_stats: Arc<ConnectionCacheStats>,
) -> Result<(), ClientErrorKind>
where
T: AsRef<[u8]>,
{
// Start off by "testing" the connection by sending the first transaction
// This will also connect to the server if not already connected
// and reconnect and retry if the first send attempt failed
// (for example due to a timed out connection), returning an error
// or the connection that was used to successfully send the transaction.
// We will use the returned connection to send the rest of the transactions in the batch
// to avoid touching the mutex in self, and not bother reconnecting if we fail along the way
// since testing even in the ideal GCE environment has found no cases
// where reconnecting and retrying in the middle of a batch send
// (i.e. we encounter a connection error in the middle of a batch send, which presumably cannot
// be due to a timed out connection) has succeeded
if buffers.is_empty() {
return Ok(());
}
let connection = self
._send_buffer(buffers[0].as_ref(), stats, connection_stats)
.await
.map_err(Into::<ClientErrorKind>::into)?;
// Used to avoid dereferencing the Arc multiple times below
// by just getting a reference to the NewConnection once
let connection_ref: &NewConnection = &connection;
let chunks = buffers[1..buffers.len()].iter().chunks(self.chunk_size);
let futures: Vec<_> = chunks
.into_iter()
.map(|buffs| {
join_all(
buffs
.into_iter()
.map(|buf| Self::_send_buffer_using_conn(buf.as_ref(), connection_ref)),
)
})
.collect();
for f in futures {
f.await
.into_iter()
.try_for_each(|res| res)
.map_err(Into::<ClientErrorKind>::into)?;
}
Ok(())
}
pub fn tpu_addr(&self) -> &SocketAddr {
&self.addr
}
pub fn stats(&self) -> Arc<ClientStats> {
self.stats.clone()
}
}
pub struct QuicTpuConnection {
pub client: Arc<QuicClient>,
pub connection_stats: Arc<ConnectionCacheStats>,
}
impl QuicTpuConnection {
pub fn base_stats(&self) -> Arc<ClientStats> {
self.client.stats()
}
pub fn connection_stats(&self) -> Arc<ConnectionCacheStats> {
self.connection_stats.clone()
}
pub fn new(
endpoint: Arc<QuicLazyInitializedEndpoint>,
addr: SocketAddr,
connection_stats: Arc<ConnectionCacheStats>,
) -> Self {
let client = Arc::new(QuicClient::new(
endpoint,
addr,
QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS,
));
Self::new_with_client(client, connection_stats)
}
pub fn new_with_client(
client: Arc<QuicClient>,
connection_stats: Arc<ConnectionCacheStats>,
) -> Self {
Self {
client,
connection_stats,
}
}
}
#[async_trait]
impl TpuConnection for QuicTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
self.client.tpu_addr()
}
async fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
let stats = ClientStats::default();
let len = buffers.len();
let res = self
.client
.send_batch(buffers, &stats, self.connection_stats.clone())
.await;
self.connection_stats
.add_client_stats(&stats, len, res.is_ok());
res?;
Ok(())
}
async fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
let stats = Arc::new(ClientStats::default());
let send_buffer =
self.client
.send_buffer(wire_transaction, &stats, self.connection_stats.clone());
if let Err(e) = send_buffer.await {
warn!(
"Failed to send transaction async to {}, error: {:?} ",
self.tpu_addr(),
e
);
datapoint_warn!("send-wire-async", ("failure", 1, i64),);
self.connection_stats.add_client_stats(&stats, 1, false);
} else {
self.connection_stats.add_client_stats(&stats, 1, true);
}
Ok(())
}
}

View File

@ -1,4 +1,189 @@
//! Simple client that connects to a given UDP port with the QUIC protocol and provides
//! an interface for sending transactions which is restricted by the server's flow control.
pub use solana_tpu_client::quic_client::*;
use {
crate::nonblocking::quic_client::{
QuicClient, QuicLazyInitializedEndpoint, QuicTpuConnection as NonblockingQuicTpuConnection,
},
lazy_static::lazy_static,
log::*,
solana_sdk::transport::{Result as TransportResult, TransportError},
solana_tpu_client::{
connection_cache_stats::ConnectionCacheStats,
nonblocking::tpu_connection::TpuConnection as NonblockingTpuConnection,
tpu_connection::{ClientStats, TpuConnection},
},
std::{
net::SocketAddr,
sync::{atomic::Ordering, Arc, Condvar, Mutex, MutexGuard},
time::Duration,
},
tokio::{runtime::Runtime, time::timeout},
};
pub mod temporary_pub {
use super::*;
pub const MAX_OUTSTANDING_TASK: u64 = 2000;
pub const SEND_TRANSACTION_TIMEOUT_MS: u64 = 10000;
/// A semaphore used for limiting the number of asynchronous tasks spawn to the
/// runtime. Before spawnning a task, use acquire. After the task is done (be it
/// succsess or failure), call release.
pub struct AsyncTaskSemaphore {
/// Keep the counter info about the usage
counter: Mutex<u64>,
/// Conditional variable for signaling when counter is decremented
cond_var: Condvar,
/// The maximum usage allowed by this semaphore.
permits: u64,
}
impl AsyncTaskSemaphore {
pub fn new(permits: u64) -> Self {
Self {
counter: Mutex::new(0),
cond_var: Condvar::new(),
permits,
}
}
/// When returned, the lock has been locked and usage count has been
/// incremented. When the returned MutexGuard is dropped the lock is dropped
/// without decrementing the usage count.
pub fn acquire(&self) -> MutexGuard<u64> {
let mut count = self.counter.lock().unwrap();
*count += 1;
while *count > self.permits {
count = self.cond_var.wait(count).unwrap();
}
count
}
/// Acquire the lock and decrement the usage count
pub fn release(&self) {
let mut count = self.counter.lock().unwrap();
*count -= 1;
self.cond_var.notify_one();
}
}
lazy_static! {
pub static ref ASYNC_TASK_SEMAPHORE: AsyncTaskSemaphore =
AsyncTaskSemaphore::new(MAX_OUTSTANDING_TASK);
pub static ref RUNTIME: Runtime = tokio::runtime::Builder::new_multi_thread()
.thread_name("quic-client")
.enable_all()
.build()
.unwrap();
}
pub async fn send_wire_transaction_async(
connection: Arc<NonblockingQuicTpuConnection>,
wire_transaction: Vec<u8>,
) -> TransportResult<()> {
let result = timeout(
Duration::from_millis(SEND_TRANSACTION_TIMEOUT_MS),
connection.send_wire_transaction(wire_transaction),
)
.await;
ASYNC_TASK_SEMAPHORE.release();
handle_send_result(result, connection)
}
pub async fn send_wire_transaction_batch_async(
connection: Arc<NonblockingQuicTpuConnection>,
buffers: Vec<Vec<u8>>,
) -> TransportResult<()> {
let time_out = SEND_TRANSACTION_TIMEOUT_MS * buffers.len() as u64;
let result = timeout(
Duration::from_millis(time_out),
connection.send_wire_transaction_batch(&buffers),
)
.await;
ASYNC_TASK_SEMAPHORE.release();
handle_send_result(result, connection)
}
/// Check the send result and update stats if timedout. Returns the checked result.
pub fn handle_send_result(
result: Result<Result<(), TransportError>, tokio::time::error::Elapsed>,
connection: Arc<NonblockingQuicTpuConnection>,
) -> Result<(), TransportError> {
match result {
Ok(result) => result,
Err(_err) => {
let client_stats = ClientStats::default();
client_stats.send_timeout.fetch_add(1, Ordering::Relaxed);
let stats = connection.connection_stats();
stats.add_client_stats(&client_stats, 0, false);
info!("Timedout sending transaction {:?}", connection.tpu_addr());
Err(TransportError::Custom(
"Timedout sending transaction".to_string(),
))
}
}
}
}
use temporary_pub::*;
pub struct QuicTpuConnection {
pub inner: Arc<NonblockingQuicTpuConnection>,
}
impl QuicTpuConnection {
pub fn new(
endpoint: Arc<QuicLazyInitializedEndpoint>,
tpu_addr: SocketAddr,
connection_stats: Arc<ConnectionCacheStats>,
) -> Self {
let inner = Arc::new(NonblockingQuicTpuConnection::new(
endpoint,
tpu_addr,
connection_stats,
));
Self { inner }
}
pub fn new_with_client(
client: Arc<QuicClient>,
connection_stats: Arc<ConnectionCacheStats>,
) -> Self {
let inner = Arc::new(NonblockingQuicTpuConnection::new_with_client(
client,
connection_stats,
));
Self { inner }
}
}
impl TpuConnection for QuicTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
self.inner.tpu_addr()
}
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
RUNTIME.block_on(self.inner.send_wire_transaction_batch(buffers))?;
Ok(())
}
fn send_wire_transaction_async(&self, wire_transaction: Vec<u8>) -> TransportResult<()> {
let _lock = ASYNC_TASK_SEMAPHORE.acquire();
let inner = self.inner.clone();
let _ = RUNTIME
.spawn(async move { send_wire_transaction_async(inner, wire_transaction).await });
Ok(())
}
fn send_wire_transaction_batch_async(&self, buffers: Vec<Vec<u8>>) -> TransportResult<()> {
let _lock = ASYNC_TASK_SEMAPHORE.acquire();
let inner = self.inner.clone();
let _ =
RUNTIME.spawn(async move { send_wire_transaction_batch_async(inner, buffers).await });
Ok(())
}
}

View File

@ -3,12 +3,10 @@ mod tests {
use {
crossbeam_channel::{unbounded, Receiver},
solana_perf::packet::PacketBatch,
solana_quic_client::nonblocking::quic_client::QuicLazyInitializedEndpoint,
solana_sdk::{packet::PACKET_DATA_SIZE, signature::Keypair},
solana_streamer::{quic::StreamStats, streamer::StakedNodes},
solana_tpu_client::{
connection_cache_stats::ConnectionCacheStats,
nonblocking::quic_client::QuicLazyInitializedEndpoint,
},
solana_tpu_client::connection_cache_stats::ConnectionCacheStats,
std::{
net::{IpAddr, SocketAddr, UdpSocket},
sync::{
@ -62,7 +60,10 @@ mod tests {
#[test]
fn test_quic_client_multiple_writes() {
use solana_tpu_client::{quic_client::QuicTpuConnection, tpu_connection::TpuConnection};
use {
solana_quic_client::quic_client::QuicTpuConnection,
solana_tpu_client::tpu_connection::TpuConnection,
};
solana_logger::setup();
let (sender, receiver) = unbounded();
let staked_nodes = Arc::new(RwLock::new(StakedNodes::default()));
@ -106,8 +107,9 @@ mod tests {
#[tokio::test]
async fn test_nonblocking_quic_client_multiple_writes() {
use solana_tpu_client::nonblocking::{
quic_client::QuicTpuConnection, tpu_connection::TpuConnection,
use {
solana_quic_client::nonblocking::quic_client::QuicTpuConnection,
solana_tpu_client::nonblocking::tpu_connection::TpuConnection,
};
solana_logger::setup();
let (sender, receiver) = unbounded();

View File

@ -20,6 +20,7 @@ reqwest = { version = "0.11.12", default-features = false, features = ["blocking
serde = "1.0.144"
serde_json = "1.0.83"
solana-account-decoder = { path = "../account-decoder", version = "=1.15.0" }
solana-client = { path = "../client", version = "=1.15.0" }
solana-pubsub-client = { path = "../pubsub-client", version = "=1.15.0" }
solana-rpc = { path = "../rpc", version = "=1.15.0" }
solana-rpc-client = { path = "../rpc-client", version = "=1.15.0", default-features = false }

View File

@ -1,10 +1,10 @@
use {
solana_sdk::{clock::DEFAULT_MS_PER_SLOT, pubkey::Pubkey, system_transaction},
solana_test_validator::TestValidatorGenesis,
solana_tpu_client::{
solana_client::{
nonblocking::tpu_client::{LeaderTpuService, TpuClient},
tpu_client::TpuClientConfig,
},
solana_sdk::{clock::DEFAULT_MS_PER_SLOT, pubkey::Pubkey, system_transaction},
solana_test_validator::TestValidatorGenesis,
std::sync::{
atomic::{AtomicBool, Ordering},
Arc,

View File

@ -6,6 +6,10 @@ use {
reqwest::{self, header::CONTENT_TYPE},
serde_json::{json, Value},
solana_account_decoder::UiAccount,
solana_client::{
connection_cache::{ConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE},
tpu_client::{TpuClient, TpuClientConfig},
},
solana_pubsub_client::nonblocking::pubsub_client::PubsubClient,
solana_rpc_client::rpc_client::RpcClient,
solana_rpc_client_api::{
@ -25,10 +29,6 @@ use {
},
solana_streamer::socket::SocketAddrSpace,
solana_test_validator::TestValidator,
solana_tpu_client::{
connection_cache::{ConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE},
tpu_client::{TpuClient, TpuClientConfig},
},
solana_transaction_status::TransactionStatus,
std::{
collections::HashSet,

View File

@ -30,6 +30,7 @@ serde_derive = "1.0.103"
serde_json = "1.0.83"
soketto = "0.7"
solana-account-decoder = { path = "../account-decoder", version = "=1.15.0" }
solana-client = { path = "../client", version = "=1.15.0" }
solana-entry = { path = "../entry", version = "=1.15.0" }
solana-faucet = { path = "../faucet", version = "=1.15.0" }
solana-gossip = { path = "../gossip", version = "=1.15.0" }

View File

@ -13,6 +13,7 @@ use {
parse_token::{is_known_spl_token_id, token_amount_to_ui_amount, UiTokenAmount},
UiAccount, UiAccountEncoding, UiDataSliceConfig, MAX_BASE58_BYTES,
},
solana_client::connection_cache::ConnectionCache,
solana_entry::entry::Entry,
solana_faucet::faucet::request_airdrop_transaction,
solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
@ -81,7 +82,6 @@ use {
solana_stake_program,
solana_storage_bigtable::Error as StorageError,
solana_streamer::socket::SocketAddrSpace,
solana_tpu_client::connection_cache::ConnectionCache,
solana_transaction_status::{
BlockEncodingOptions, ConfirmedBlock, ConfirmedTransactionStatusWithSignature,
ConfirmedTransactionWithStatusMeta, EncodedConfirmedTransactionWithStatusMeta, Reward,

View File

@ -19,6 +19,7 @@ use {
RequestMiddlewareAction, ServerBuilder,
},
regex::Regex,
solana_client::connection_cache::ConnectionCache,
solana_gossip::cluster_info::ClusterInfo,
solana_ledger::{
bigtable_upload::ConfirmedBlockUploadConfig,
@ -40,7 +41,6 @@ use {
},
solana_send_transaction_service::send_transaction_service::{self, SendTransactionService},
solana_storage_bigtable::CredentialType,
solana_tpu_client::connection_cache::ConnectionCache,
std::{
collections::HashSet,
net::SocketAddr,

View File

@ -12,6 +12,7 @@ edition = "2021"
[dependencies]
crossbeam-channel = "0.5"
log = "0.4.17"
solana-client = { path = "../client", version = "=1.15.0" }
solana-measure = { path = "../measure", version = "=1.15.0" }
solana-metrics = { path = "../metrics", version = "=1.15.0" }
solana-runtime = { path = "../runtime", version = "=1.15.0" }

View File

@ -2,6 +2,7 @@ use {
crate::tpu_info::TpuInfo,
crossbeam_channel::{Receiver, RecvTimeoutError},
log::*,
solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
solana_measure::measure::Measure,
solana_metrics::datapoint_warn,
solana_runtime::{bank::Bank, bank_forks::BankForks},
@ -9,7 +10,6 @@ use {
hash::Hash, nonce_account, pubkey::Pubkey, saturating_add_assign, signature::Signature,
timing::AtomicInterval, transport::TransportError,
},
solana_tpu_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
std::{
collections::{
hash_map::{Entry, HashMap},

View File

@ -44,7 +44,7 @@ use {
signature::{read_keypair_file, write_keypair_file, Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
solana_tpu_client::connection_cache::{
solana_tpu_client::tpu_connection_cache::{
DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, DEFAULT_TPU_USE_QUIC,
},
std::{

View File

@ -25,7 +25,10 @@ use {
transaction::{self, Transaction, VersionedTransaction},
transport::Result as TransportResult,
},
solana_tpu_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection},
solana_tpu_client::{
tpu_connection::TpuConnection,
tpu_connection_cache::{ConnectionPool, TpuConnectionCache},
},
std::{
io,
net::SocketAddr,
@ -37,101 +40,94 @@ use {
},
};
struct ClientOptimizer {
cur_index: AtomicUsize,
experiment_index: AtomicUsize,
experiment_done: AtomicBool,
times: RwLock<Vec<u64>>,
num_clients: usize,
}
pub mod temporary_pub {
use super::*;
fn min_index(array: &[u64]) -> (u64, usize) {
let mut min_time = std::u64::MAX;
let mut min_index = 0;
for (i, time) in array.iter().enumerate() {
if *time < min_time {
min_time = *time;
min_index = i;
}
}
(min_time, min_index)
}
impl ClientOptimizer {
fn new(num_clients: usize) -> Self {
Self {
cur_index: AtomicUsize::new(0),
experiment_index: AtomicUsize::new(0),
experiment_done: AtomicBool::new(false),
times: RwLock::new(vec![std::u64::MAX; num_clients]),
num_clients,
}
pub struct ClientOptimizer {
cur_index: AtomicUsize,
experiment_index: AtomicUsize,
experiment_done: AtomicBool,
times: RwLock<Vec<u64>>,
num_clients: usize,
}
fn experiment(&self) -> usize {
if self.experiment_index.load(Ordering::Relaxed) < self.num_clients {
let old = self.experiment_index.fetch_add(1, Ordering::Relaxed);
if old < self.num_clients {
old
impl ClientOptimizer {
pub fn new(num_clients: usize) -> Self {
Self {
cur_index: AtomicUsize::new(0),
experiment_index: AtomicUsize::new(0),
experiment_done: AtomicBool::new(false),
times: RwLock::new(vec![std::u64::MAX; num_clients]),
num_clients,
}
}
pub fn experiment(&self) -> usize {
if self.experiment_index.load(Ordering::Relaxed) < self.num_clients {
let old = self.experiment_index.fetch_add(1, Ordering::Relaxed);
if old < self.num_clients {
old
} else {
self.best()
}
} else {
self.best()
}
} else {
self.best()
}
}
fn report(&self, index: usize, time_ms: u64) {
if self.num_clients > 1
&& (!self.experiment_done.load(Ordering::Relaxed) || time_ms == std::u64::MAX)
{
trace!(
"report {} with {} exp: {}",
index,
time_ms,
self.experiment_index.load(Ordering::Relaxed)
);
self.times.write().unwrap()[index] = time_ms;
if index == (self.num_clients - 1) || time_ms == std::u64::MAX {
let times = self.times.read().unwrap();
let (min_time, min_index) = min_index(&times);
pub fn report(&self, index: usize, time_ms: u64) {
if self.num_clients > 1
&& (!self.experiment_done.load(Ordering::Relaxed) || time_ms == std::u64::MAX)
{
trace!(
"done experimenting min: {} time: {} times: {:?}",
min_index,
min_time,
times
"report {} with {} exp: {}",
index,
time_ms,
self.experiment_index.load(Ordering::Relaxed)
);
// Only 1 thread should grab the num_clients-1 index, so this should be ok.
self.cur_index.store(min_index, Ordering::Relaxed);
self.experiment_done.store(true, Ordering::Relaxed);
self.times.write().unwrap()[index] = time_ms;
if index == (self.num_clients - 1) || time_ms == std::u64::MAX {
let times = self.times.read().unwrap();
let (min_time, min_index) = min_index(&times);
trace!(
"done experimenting min: {} time: {} times: {:?}",
min_index,
min_time,
times
);
// Only 1 thread should grab the num_clients-1 index, so this should be ok.
self.cur_index.store(min_index, Ordering::Relaxed);
self.experiment_done.store(true, Ordering::Relaxed);
}
}
}
}
fn best(&self) -> usize {
self.cur_index.load(Ordering::Relaxed)
pub fn best(&self) -> usize {
self.cur_index.load(Ordering::Relaxed)
}
}
}
use temporary_pub::*;
/// An object for querying and sending transactions to the network.
pub struct ThinClient {
pub struct ThinClient<P: ConnectionPool> {
rpc_clients: Vec<RpcClient>,
tpu_addrs: Vec<SocketAddr>,
optimizer: ClientOptimizer,
connection_cache: Arc<ConnectionCache>,
connection_cache: Arc<TpuConnectionCache<P>>,
}
impl ThinClient {
impl<P: ConnectionPool> ThinClient<P> {
/// Create a new ThinClient that will interface with the Rpc at `rpc_addr` using TCP
/// and the Tpu at `tpu_addr` over `transactions_socket` using Quic or UDP
/// (currently hardcoded to UDP)
pub fn new(
rpc_addr: SocketAddr,
tpu_addr: SocketAddr,
connection_cache: Arc<ConnectionCache>,
connection_cache: Arc<TpuConnectionCache<P>>,
) -> Self {
Self::new_from_client(RpcClient::new_socket(rpc_addr), tpu_addr, connection_cache)
}
@ -140,7 +136,7 @@ impl ThinClient {
rpc_addr: SocketAddr,
tpu_addr: SocketAddr,
timeout: Duration,
connection_cache: Arc<ConnectionCache>,
connection_cache: Arc<TpuConnectionCache<P>>,
) -> Self {
let rpc_client = RpcClient::new_socket_with_timeout(rpc_addr, timeout);
Self::new_from_client(rpc_client, tpu_addr, connection_cache)
@ -149,7 +145,7 @@ impl ThinClient {
fn new_from_client(
rpc_client: RpcClient,
tpu_addr: SocketAddr,
connection_cache: Arc<ConnectionCache>,
connection_cache: Arc<TpuConnectionCache<P>>,
) -> Self {
Self {
rpc_clients: vec![rpc_client],
@ -162,7 +158,7 @@ impl ThinClient {
pub fn new_from_addrs(
rpc_addrs: Vec<SocketAddr>,
tpu_addrs: Vec<SocketAddr>,
connection_cache: Arc<ConnectionCache>,
connection_cache: Arc<TpuConnectionCache<P>>,
) -> Self {
assert!(!rpc_addrs.is_empty());
assert_eq!(rpc_addrs.len(), tpu_addrs.len());
@ -320,13 +316,13 @@ impl ThinClient {
}
}
impl Client for ThinClient {
impl<P: ConnectionPool> Client for ThinClient<P> {
fn tpu_addr(&self) -> String {
self.tpu_addr().to_string()
}
}
impl SyncClient for ThinClient {
impl<P: ConnectionPool> SyncClient for ThinClient<P> {
fn send_and_confirm_message<T: Signers>(
&self,
keypairs: &T,
@ -606,7 +602,7 @@ impl SyncClient for ThinClient {
}
}
impl AsyncClient for ThinClient {
impl<P: ConnectionPool> AsyncClient for ThinClient<P> {
fn async_send_versioned_transaction(
&self,
transaction: VersionedTransaction,
@ -626,6 +622,18 @@ impl AsyncClient for ThinClient {
}
}
fn min_index(array: &[u64]) -> (u64, usize) {
let mut min_time = std::u64::MAX;
let mut min_index = 0;
for (i, time) in array.iter().enumerate() {
if *time < min_time {
min_time = *time;
min_index = i;
}
}
(min_time, min_index)
}
#[cfg(test)]
mod tests {
use {super::*, rayon::prelude::*};

View File

@ -10,23 +10,14 @@ documentation = "https://docs.rs/solana-tpu-client"
edition = "2021"
[dependencies]
async-mutex = "1.4.0"
async-trait = "0.1.57"
bincode = "1.3.3"
enum_dispatch = "0.3.8"
futures = "0.3"
futures-util = "0.3.21"
indexmap = "1.9.1"
indicatif = { version = "0.17.1", optional = true }
itertools = "0.10.5"
lazy_static = "1.4.0"
log = "0.4.17"
quinn = "0.8.4"
quinn-proto = "0.8.4"
quinn-udp = "0.1.3"
rand = "0.7.0"
rayon = "1.5.3"
rustls = { version = "0.20.6", features = ["dangerous_configuration"] }
solana-measure = { path = "../measure", version = "=1.15.0" }
solana-metrics = { path = "../metrics", version = "=1.15.0" }
solana-net-utils = { path = "../net-utils", version = "=1.15.0" }
@ -34,15 +25,12 @@ solana-pubsub-client = { path = "../pubsub-client", version = "=1.15.0" }
solana-rpc-client = { path = "../rpc-client", version = "=1.15.0", default-features = false }
solana-rpc-client-api = { path = "../rpc-client-api", version = "=1.15.0" }
solana-sdk = { path = "../sdk", version = "=1.15.0" }
solana-streamer = { path = "../streamer", version = "=1.15.0" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
[dev-dependencies]
crossbeam-channel = "0.5"
rand_chacha = "0.2.2"
solana-logger = { path = "../logger", version = "=1.15.0" }
solana-perf = { path = "../perf", version = "=1.15.0" }
[features]
default = ["spinner"]

View File

@ -5,25 +5,25 @@ use {
#[derive(Default)]
pub struct ConnectionCacheStats {
pub(crate) cache_hits: AtomicU64,
pub(crate) cache_misses: AtomicU64,
pub(crate) cache_evictions: AtomicU64,
pub(crate) eviction_time_ms: AtomicU64,
pub(crate) sent_packets: AtomicU64,
pub(crate) total_batches: AtomicU64,
pub(crate) batch_success: AtomicU64,
pub(crate) batch_failure: AtomicU64,
pub(crate) get_connection_ms: AtomicU64,
pub(crate) get_connection_lock_ms: AtomicU64,
pub(crate) get_connection_hit_ms: AtomicU64,
pub(crate) get_connection_miss_ms: AtomicU64,
pub cache_hits: AtomicU64,
pub cache_misses: AtomicU64,
pub cache_evictions: AtomicU64,
pub eviction_time_ms: AtomicU64,
pub sent_packets: AtomicU64,
pub total_batches: AtomicU64,
pub batch_success: AtomicU64,
pub batch_failure: AtomicU64,
pub get_connection_ms: AtomicU64,
pub get_connection_lock_ms: AtomicU64,
pub get_connection_hit_ms: AtomicU64,
pub get_connection_miss_ms: AtomicU64,
// Need to track these separately per-connection
// because we need to track the base stat value from quinn
pub total_client_stats: ClientStats,
}
pub(crate) const CONNECTION_STAT_SUBMISSION_INTERVAL: u64 = 2000;
pub const CONNECTION_STAT_SUBMISSION_INTERVAL: u64 = 2000;
impl ConnectionCacheStats {
pub fn add_client_stats(
@ -70,7 +70,7 @@ impl ConnectionCacheStats {
}
}
pub(crate) fn report(&self) {
pub fn report(&self) {
datapoint_info!(
"quic-client-connection-stats",
(

View File

@ -1,13 +1,10 @@
#![allow(clippy::integer_arithmetic)]
pub mod connection_cache;
pub mod connection_cache_stats;
pub mod nonblocking;
pub mod quic_client;
pub mod tpu_client;
pub mod tpu_connection;
pub mod tpu_connection_cache;
pub mod udp_client;
#[macro_use]
extern crate solana_metrics;

View File

@ -1,4 +1,2 @@
pub mod quic_client;
pub mod tpu_client;
pub mod tpu_connection;
pub mod udp_client;

View File

@ -1,598 +0,0 @@
//! Simple nonblocking client that connects to a given UDP port with the QUIC protocol
//! and provides an interface for sending transactions which is restricted by the
//! server's flow control.
use {
crate::{
connection_cache_stats::ConnectionCacheStats, nonblocking::tpu_connection::TpuConnection,
tpu_connection::ClientStats,
},
async_mutex::Mutex,
async_trait::async_trait,
futures::future::join_all,
itertools::Itertools,
log::*,
quinn::{
ClientConfig, ConnectError, ConnectionError, Endpoint, EndpointConfig, IdleTimeout,
NewConnection, VarInt, WriteError,
},
solana_measure::measure::Measure,
solana_net_utils::VALIDATOR_PORT_RANGE,
solana_rpc_client_api::client_error::ErrorKind as ClientErrorKind,
solana_sdk::{
quic::{
QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS, QUIC_KEEP_ALIVE_MS, QUIC_MAX_TIMEOUT_MS,
QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS,
},
signature::Keypair,
transport::Result as TransportResult,
},
solana_streamer::{
nonblocking::quic::ALPN_TPU_PROTOCOL_ID,
tls_certificates::new_self_signed_tls_certificate_chain,
},
std::{
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
sync::{atomic::Ordering, Arc},
thread,
time::Duration,
},
thiserror::Error,
tokio::{sync::RwLock, time::timeout},
};
struct SkipServerVerification;
impl SkipServerVerification {
pub fn new() -> Arc<Self> {
Arc::new(Self)
}
}
impl rustls::client::ServerCertVerifier for SkipServerVerification {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}
pub struct QuicClientCertificate {
pub certificates: Vec<rustls::Certificate>,
pub key: rustls::PrivateKey,
}
/// A lazy-initialized Quic Endpoint
pub struct QuicLazyInitializedEndpoint {
endpoint: RwLock<Option<Arc<Endpoint>>>,
client_certificate: Arc<QuicClientCertificate>,
}
#[derive(Error, Debug)]
pub enum QuicError {
#[error(transparent)]
WriteError(#[from] WriteError),
#[error(transparent)]
ConnectionError(#[from] ConnectionError),
#[error(transparent)]
ConnectError(#[from] ConnectError),
}
impl From<QuicError> for ClientErrorKind {
fn from(quic_error: QuicError) -> Self {
Self::Custom(format!("{:?}", quic_error))
}
}
impl QuicLazyInitializedEndpoint {
pub fn new(client_certificate: Arc<QuicClientCertificate>) -> Self {
Self {
endpoint: RwLock::new(None),
client_certificate,
}
}
fn create_endpoint(&self) -> Endpoint {
let (_, client_socket) = solana_net_utils::bind_in_range(
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
VALIDATOR_PORT_RANGE,
)
.expect("QuicLazyInitializedEndpoint::create_endpoint bind_in_range");
let mut crypto = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_custom_certificate_verifier(SkipServerVerification::new())
.with_single_cert(
self.client_certificate.certificates.clone(),
self.client_certificate.key.clone(),
)
.expect("Failed to set QUIC client certificates");
crypto.enable_early_data = true;
crypto.alpn_protocols = vec![ALPN_TPU_PROTOCOL_ID.to_vec()];
let mut endpoint =
QuicNewConnection::create_endpoint(EndpointConfig::default(), client_socket);
let mut config = ClientConfig::new(Arc::new(crypto));
let transport_config = Arc::get_mut(&mut config.transport)
.expect("QuicLazyInitializedEndpoint::create_endpoint Arc::get_mut");
let timeout = IdleTimeout::from(VarInt::from_u32(QUIC_MAX_TIMEOUT_MS));
transport_config.max_idle_timeout(Some(timeout));
transport_config.keep_alive_interval(Some(Duration::from_millis(QUIC_KEEP_ALIVE_MS)));
endpoint.set_default_client_config(config);
endpoint
}
async fn get_endpoint(&self) -> Arc<Endpoint> {
let lock = self.endpoint.read().await;
let endpoint = lock.as_ref();
match endpoint {
Some(endpoint) => endpoint.clone(),
None => {
drop(lock);
let mut lock = self.endpoint.write().await;
let endpoint = lock.as_ref();
match endpoint {
Some(endpoint) => endpoint.clone(),
None => {
let connection = Arc::new(self.create_endpoint());
*lock = Some(connection.clone());
connection
}
}
}
}
}
}
impl Default for QuicLazyInitializedEndpoint {
fn default() -> Self {
let (certs, priv_key) = new_self_signed_tls_certificate_chain(
&Keypair::new(),
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
)
.expect("Failed to create QUIC client certificate");
Self::new(Arc::new(QuicClientCertificate {
certificates: certs,
key: priv_key,
}))
}
}
/// A wrapper over NewConnection with additional capability to create the endpoint as part
/// of creating a new connection.
#[derive(Clone)]
struct QuicNewConnection {
endpoint: Arc<Endpoint>,
connection: Arc<NewConnection>,
}
impl QuicNewConnection {
/// Create a QuicNewConnection given the remote address 'addr'.
async fn make_connection(
endpoint: Arc<QuicLazyInitializedEndpoint>,
addr: SocketAddr,
stats: &ClientStats,
) -> Result<Self, QuicError> {
let mut make_connection_measure = Measure::start("make_connection_measure");
let endpoint = endpoint.get_endpoint().await;
let connecting = endpoint.connect(addr, "connect")?;
stats.total_connections.fetch_add(1, Ordering::Relaxed);
if let Ok(connecting_result) = timeout(
Duration::from_millis(QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS),
connecting,
)
.await
{
if connecting_result.is_err() {
stats.connection_errors.fetch_add(1, Ordering::Relaxed);
}
make_connection_measure.stop();
stats
.make_connection_ms
.fetch_add(make_connection_measure.as_ms(), Ordering::Relaxed);
let connection = connecting_result?;
Ok(Self {
endpoint,
connection: Arc::new(connection),
})
} else {
Err(ConnectionError::TimedOut.into())
}
}
fn create_endpoint(config: EndpointConfig, client_socket: UdpSocket) -> Endpoint {
quinn::Endpoint::new(config, None, client_socket)
.expect("QuicNewConnection::create_endpoint quinn::Endpoint::new")
.0
}
// Attempts to make a faster connection by taking advantage of pre-existing key material.
// Only works if connection to this endpoint was previously established.
async fn make_connection_0rtt(
&mut self,
addr: SocketAddr,
stats: &ClientStats,
) -> Result<Arc<NewConnection>, QuicError> {
let connecting = self.endpoint.connect(addr, "connect")?;
stats.total_connections.fetch_add(1, Ordering::Relaxed);
let connection = match connecting.into_0rtt() {
Ok((connection, zero_rtt)) => {
if let Ok(zero_rtt) = timeout(
Duration::from_millis(QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS),
zero_rtt,
)
.await
{
if zero_rtt {
stats.zero_rtt_accepts.fetch_add(1, Ordering::Relaxed);
} else {
stats.zero_rtt_rejects.fetch_add(1, Ordering::Relaxed);
}
connection
} else {
return Err(ConnectionError::TimedOut.into());
}
}
Err(connecting) => {
stats.connection_errors.fetch_add(1, Ordering::Relaxed);
if let Ok(connecting_result) = timeout(
Duration::from_millis(QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS),
connecting,
)
.await
{
connecting_result?
} else {
return Err(ConnectionError::TimedOut.into());
}
}
};
self.connection = Arc::new(connection);
Ok(self.connection.clone())
}
}
pub struct QuicClient {
endpoint: Arc<QuicLazyInitializedEndpoint>,
connection: Arc<Mutex<Option<QuicNewConnection>>>,
addr: SocketAddr,
stats: Arc<ClientStats>,
chunk_size: usize,
}
impl QuicClient {
pub fn new(
endpoint: Arc<QuicLazyInitializedEndpoint>,
addr: SocketAddr,
chunk_size: usize,
) -> Self {
Self {
endpoint,
connection: Arc::new(Mutex::new(None)),
addr,
stats: Arc::new(ClientStats::default()),
chunk_size,
}
}
async fn _send_buffer_using_conn(
data: &[u8],
connection: &NewConnection,
) -> Result<(), QuicError> {
let mut send_stream = connection.connection.open_uni().await?;
send_stream.write_all(data).await?;
send_stream.finish().await?;
Ok(())
}
// Attempts to send data, connecting/reconnecting as necessary
// On success, returns the connection used to successfully send the data
async fn _send_buffer(
&self,
data: &[u8],
stats: &ClientStats,
connection_stats: Arc<ConnectionCacheStats>,
) -> Result<Arc<NewConnection>, QuicError> {
let mut connection_try_count = 0;
let mut last_connection_id = 0;
let mut last_error = None;
while connection_try_count < 2 {
let connection = {
let mut conn_guard = self.connection.lock().await;
let maybe_conn = conn_guard.as_mut();
match maybe_conn {
Some(conn) => {
if conn.connection.connection.stable_id() == last_connection_id {
// this is the problematic connection we had used before, create a new one
let conn = conn.make_connection_0rtt(self.addr, stats).await;
match conn {
Ok(conn) => {
info!(
"Made 0rtt connection to {} with id {} try_count {}, last_connection_id: {}, last_error: {:?}",
self.addr,
conn.connection.stable_id(),
connection_try_count,
last_connection_id,
last_error,
);
connection_try_count += 1;
conn
}
Err(err) => {
info!(
"Cannot make 0rtt connection to {}, error {:}",
self.addr, err
);
return Err(err);
}
}
} else {
stats.connection_reuse.fetch_add(1, Ordering::Relaxed);
conn.connection.clone()
}
}
None => {
let conn = QuicNewConnection::make_connection(
self.endpoint.clone(),
self.addr,
stats,
)
.await;
match conn {
Ok(conn) => {
*conn_guard = Some(conn.clone());
info!(
"Made connection to {} id {} try_count {}",
self.addr,
conn.connection.connection.stable_id(),
connection_try_count
);
connection_try_count += 1;
conn.connection.clone()
}
Err(err) => {
info!("Cannot make connection to {}, error {:}", self.addr, err);
return Err(err);
}
}
}
}
};
let new_stats = connection.connection.stats();
connection_stats
.total_client_stats
.congestion_events
.update_stat(
&self.stats.congestion_events,
new_stats.path.congestion_events,
);
connection_stats
.total_client_stats
.tx_streams_blocked_uni
.update_stat(
&self.stats.tx_streams_blocked_uni,
new_stats.frame_tx.streams_blocked_uni,
);
connection_stats
.total_client_stats
.tx_data_blocked
.update_stat(&self.stats.tx_data_blocked, new_stats.frame_tx.data_blocked);
connection_stats
.total_client_stats
.tx_acks
.update_stat(&self.stats.tx_acks, new_stats.frame_tx.acks);
last_connection_id = connection.connection.stable_id();
match Self::_send_buffer_using_conn(data, &connection).await {
Ok(()) => {
return Ok(connection);
}
Err(err) => match err {
QuicError::ConnectionError(_) => {
last_error = Some(err);
}
_ => {
info!(
"Error sending to {} with id {}, error {:?} thread: {:?}",
self.addr,
connection.connection.stable_id(),
err,
thread::current().id(),
);
return Err(err);
}
},
}
}
// if we come here, that means we have exhausted maximum retries, return the error
info!(
"Ran into an error sending transactions {:?}, exhausted retries to {}",
last_error, self.addr
);
// If we get here but last_error is None, then we have a logic error
// in this function, so panic here with an expect to help debugging
Err(last_error.expect("QuicClient::_send_buffer last_error.expect"))
}
pub async fn send_buffer<T>(
&self,
data: T,
stats: &ClientStats,
connection_stats: Arc<ConnectionCacheStats>,
) -> Result<(), ClientErrorKind>
where
T: AsRef<[u8]>,
{
self._send_buffer(data.as_ref(), stats, connection_stats)
.await
.map_err(Into::<ClientErrorKind>::into)?;
Ok(())
}
pub async fn send_batch<T>(
&self,
buffers: &[T],
stats: &ClientStats,
connection_stats: Arc<ConnectionCacheStats>,
) -> Result<(), ClientErrorKind>
where
T: AsRef<[u8]>,
{
// Start off by "testing" the connection by sending the first transaction
// This will also connect to the server if not already connected
// and reconnect and retry if the first send attempt failed
// (for example due to a timed out connection), returning an error
// or the connection that was used to successfully send the transaction.
// We will use the returned connection to send the rest of the transactions in the batch
// to avoid touching the mutex in self, and not bother reconnecting if we fail along the way
// since testing even in the ideal GCE environment has found no cases
// where reconnecting and retrying in the middle of a batch send
// (i.e. we encounter a connection error in the middle of a batch send, which presumably cannot
// be due to a timed out connection) has succeeded
if buffers.is_empty() {
return Ok(());
}
let connection = self
._send_buffer(buffers[0].as_ref(), stats, connection_stats)
.await
.map_err(Into::<ClientErrorKind>::into)?;
// Used to avoid dereferencing the Arc multiple times below
// by just getting a reference to the NewConnection once
let connection_ref: &NewConnection = &connection;
let chunks = buffers[1..buffers.len()].iter().chunks(self.chunk_size);
let futures: Vec<_> = chunks
.into_iter()
.map(|buffs| {
join_all(
buffs
.into_iter()
.map(|buf| Self::_send_buffer_using_conn(buf.as_ref(), connection_ref)),
)
})
.collect();
for f in futures {
f.await
.into_iter()
.try_for_each(|res| res)
.map_err(Into::<ClientErrorKind>::into)?;
}
Ok(())
}
pub fn tpu_addr(&self) -> &SocketAddr {
&self.addr
}
pub fn stats(&self) -> Arc<ClientStats> {
self.stats.clone()
}
}
pub struct QuicTpuConnection {
client: Arc<QuicClient>,
connection_stats: Arc<ConnectionCacheStats>,
}
impl QuicTpuConnection {
pub fn base_stats(&self) -> Arc<ClientStats> {
self.client.stats()
}
pub fn connection_stats(&self) -> Arc<ConnectionCacheStats> {
self.connection_stats.clone()
}
pub fn new(
endpoint: Arc<QuicLazyInitializedEndpoint>,
addr: SocketAddr,
connection_stats: Arc<ConnectionCacheStats>,
) -> Self {
let client = Arc::new(QuicClient::new(
endpoint,
addr,
QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS,
));
Self::new_with_client(client, connection_stats)
}
pub fn new_with_client(
client: Arc<QuicClient>,
connection_stats: Arc<ConnectionCacheStats>,
) -> Self {
Self {
client,
connection_stats,
}
}
}
#[async_trait]
impl TpuConnection for QuicTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
self.client.tpu_addr()
}
async fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
let stats = ClientStats::default();
let len = buffers.len();
let res = self
.client
.send_batch(buffers, &stats, self.connection_stats.clone())
.await;
self.connection_stats
.add_client_stats(&stats, len, res.is_ok());
res?;
Ok(())
}
async fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
let stats = Arc::new(ClientStats::default());
let send_buffer =
self.client
.send_buffer(wire_transaction, &stats, self.connection_stats.clone());
if let Err(e) = send_buffer.await {
warn!(
"Failed to send transaction async to {}, error: {:?} ",
self.tpu_addr(),
e
);
datapoint_warn!("send-wire-async", ("failure", 1, i64),);
self.connection_stats.add_client_stats(&stats, 1, false);
} else {
self.connection_stats.add_client_stats(&stats, 1, true);
}
Ok(())
}
}

View File

@ -1,6 +1,6 @@
#[cfg(feature = "spinner")]
use {
crate::tpu_client::{SEND_TRANSACTION_INTERVAL, TRANSACTION_RESEND_INTERVAL},
crate::tpu_client::temporary_pub::{SEND_TRANSACTION_INTERVAL, TRANSACTION_RESEND_INTERVAL},
indicatif::ProgressBar,
solana_rpc_client::spinner,
solana_rpc_client_api::request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
@ -8,9 +8,11 @@ use {
};
use {
crate::{
connection_cache::ConnectionCache,
nonblocking::tpu_connection::TpuConnection,
tpu_client::{RecentLeaderSlots, TpuClientConfig, MAX_FANOUT_SLOTS},
tpu_connection_cache::{
ConnectionPool, TpuConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE,
},
},
bincode::serialize,
futures_util::{future::join_all, stream::StreamExt},
@ -46,6 +48,37 @@ use {
},
};
pub mod temporary_pub {
use super::*;
pub type Result<T> = std::result::Result<T, TpuSenderError>;
#[cfg(feature = "spinner")]
pub fn set_message_for_confirmed_transactions(
progress_bar: &ProgressBar,
confirmed_transactions: u32,
total_transactions: usize,
block_height: Option<u64>,
last_valid_block_height: u64,
status: &str,
) {
progress_bar.set_message(format!(
"{:>5.1}% | {:<40}{}",
confirmed_transactions as f64 * 100. / total_transactions as f64,
status,
match block_height {
Some(block_height) => format!(
" [block height {}; re-sign in {} blocks]",
block_height,
last_valid_block_height.saturating_sub(block_height),
),
None => String::new(),
},
));
}
}
use temporary_pub::*;
#[derive(Error, Debug)]
pub enum TpuSenderError {
#[error("Pubsub error: {0:?}")]
@ -61,9 +94,9 @@ pub enum TpuSenderError {
}
struct LeaderTpuCacheUpdateInfo {
maybe_cluster_nodes: Option<ClientResult<Vec<RpcContactInfo>>>,
maybe_epoch_info: Option<ClientResult<EpochInfo>>,
maybe_slot_leaders: Option<ClientResult<Vec<Pubkey>>>,
pub(super) maybe_cluster_nodes: Option<ClientResult<Vec<RpcContactInfo>>>,
pub(super) maybe_epoch_info: Option<ClientResult<EpochInfo>>,
pub(super) maybe_slot_leaders: Option<ClientResult<Vec<Pubkey>>>,
}
impl LeaderTpuCacheUpdateInfo {
pub fn has_some(&self) -> bool {
@ -210,20 +243,18 @@ impl LeaderTpuCache {
}
}
type Result<T> = std::result::Result<T, TpuSenderError>;
/// Client which sends transactions directly to the current leader's TPU port over UDP.
/// The client uses RPC to determine the current leader and fetch node contact info
pub struct TpuClient {
pub struct TpuClient<P: ConnectionPool> {
fanout_slots: u64,
leader_tpu_service: LeaderTpuService,
exit: Arc<AtomicBool>,
rpc_client: Arc<RpcClient>,
connection_cache: Arc<ConnectionCache>,
connection_cache: Arc<TpuConnectionCache<P>>,
}
async fn send_wire_transaction_to_addr(
connection_cache: &ConnectionCache,
async fn send_wire_transaction_to_addr<P: ConnectionPool>(
connection_cache: &TpuConnectionCache<P>,
addr: &SocketAddr,
wire_transaction: Vec<u8>,
) -> TransportResult<()> {
@ -231,8 +262,8 @@ async fn send_wire_transaction_to_addr(
conn.send_wire_transaction(wire_transaction.clone()).await
}
async fn send_wire_transaction_batch_to_addr(
connection_cache: &ConnectionCache,
async fn send_wire_transaction_batch_to_addr<P: ConnectionPool>(
connection_cache: &TpuConnectionCache<P>,
addr: &SocketAddr,
wire_transactions: &[Vec<u8>],
) -> TransportResult<()> {
@ -240,7 +271,7 @@ async fn send_wire_transaction_batch_to_addr(
conn.send_wire_transaction_batch(wire_transactions).await
}
impl TpuClient {
impl<P: ConnectionPool> TpuClient<P> {
/// Serialize and send transaction to the current and upcoming leader TPUs according to fanout
/// size
pub async fn send_transaction(&self, transaction: &Transaction) -> bool {
@ -356,7 +387,8 @@ impl TpuClient {
websocket_url: &str,
config: TpuClientConfig,
) -> Result<Self> {
let connection_cache = Arc::new(ConnectionCache::default());
let connection_cache =
Arc::new(TpuConnectionCache::new(DEFAULT_TPU_CONNECTION_POOL_SIZE).unwrap()); // TODO: Handle error properly, as the TpuConnectionCache ctor is now fallible.
Self::new_with_connection_cache(rpc_client, websocket_url, config, connection_cache).await
}
@ -365,7 +397,7 @@ impl TpuClient {
rpc_client: Arc<RpcClient>,
websocket_url: &str,
config: TpuClientConfig,
connection_cache: Arc<ConnectionCache>,
connection_cache: Arc<TpuConnectionCache<P>>,
) -> Result<Self> {
let exit = Arc::new(AtomicBool::new(false));
let leader_tpu_service =
@ -519,7 +551,8 @@ impl TpuClient {
self.leader_tpu_service.join().await;
}
}
impl Drop for TpuClient {
impl<P: ConnectionPool> Drop for TpuClient<P> {
fn drop(&mut self) {
self.exit.store(true, Ordering::Relaxed);
}
@ -594,7 +627,7 @@ impl LeaderTpuService {
self.recent_slots.estimated_current_slot()
}
fn leader_tpu_sockets(&self, fanout_slots: u64) -> Vec<SocketAddr> {
pub fn leader_tpu_sockets(&self, fanout_slots: u64) -> Vec<SocketAddr> {
let current_slot = self.recent_slots.estimated_current_slot();
self.leader_tpu_cache
.read()
@ -721,27 +754,3 @@ async fn maybe_fetch_cache_info(
maybe_slot_leaders,
}
}
#[cfg(feature = "spinner")]
fn set_message_for_confirmed_transactions(
progress_bar: &ProgressBar,
confirmed_transactions: u32,
total_transactions: usize,
block_height: Option<u64>,
last_valid_block_height: u64,
status: &str,
) {
progress_bar.set_message(format!(
"{:>5.1}% | {:<40}{}",
confirmed_transactions as f64 * 100. / total_transactions as f64,
status,
match block_height {
Some(block_height) => format!(
" [block height {}; re-sign in {} blocks]",
block_height,
last_valid_block_height.saturating_sub(block_height),
),
None => String::new(),
},
));
}

View File

@ -1,24 +1,12 @@
//! Trait defining async send functions, to be used for UDP or QUIC sending
use {
crate::nonblocking::{quic_client::QuicTpuConnection, udp_client::UdpTpuConnection},
async_trait::async_trait,
enum_dispatch::enum_dispatch,
solana_sdk::{transaction::VersionedTransaction, transport::Result as TransportResult},
std::net::SocketAddr,
};
// Due to the existence of `crate::connection_cache::Connection`, if this is named
// `Connection`, enum_dispatch gets confused between the two and throws errors when
// trying to convert later.
#[enum_dispatch]
pub enum NonblockingConnection {
QuicTpuConnection,
UdpTpuConnection,
}
#[async_trait]
#[enum_dispatch(NonblockingConnection)]
pub trait TpuConnection {
fn tpu_addr(&self) -> &SocketAddr;

View File

@ -1,93 +0,0 @@
//! Simple UDP client that communicates with the given UDP port with UDP and provides
//! an interface for sending transactions
use {
crate::nonblocking::tpu_connection::TpuConnection, async_trait::async_trait,
core::iter::repeat, solana_sdk::transport::Result as TransportResult,
solana_streamer::nonblocking::sendmmsg::batch_send, std::net::SocketAddr,
tokio::net::UdpSocket,
};
pub struct UdpTpuConnection {
socket: UdpSocket,
addr: SocketAddr,
}
impl UdpTpuConnection {
pub fn new_from_addr(socket: std::net::UdpSocket, tpu_addr: SocketAddr) -> Self {
socket.set_nonblocking(true).unwrap();
let socket = UdpSocket::from_std(socket).unwrap();
Self {
socket,
addr: tpu_addr,
}
}
}
#[async_trait]
impl TpuConnection for UdpTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
&self.addr
}
async fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
self.socket
.send_to(wire_transaction.as_ref(), self.addr)
.await?;
Ok(())
}
async fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
let pkts: Vec<_> = buffers.iter().zip(repeat(self.tpu_addr())).collect();
batch_send(&self.socket, &pkts).await?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use {
crate::nonblocking::{tpu_connection::TpuConnection, udp_client::UdpTpuConnection},
solana_sdk::packet::{Packet, PACKET_DATA_SIZE},
solana_streamer::nonblocking::recvmmsg::recv_mmsg,
std::net::{IpAddr, Ipv4Addr},
tokio::net::UdpSocket,
};
async fn check_send_one(connection: &UdpTpuConnection, reader: &UdpSocket) {
let packet = vec![111u8; PACKET_DATA_SIZE];
connection.send_wire_transaction(&packet).await.unwrap();
let mut packets = vec![Packet::default(); 32];
let recv = recv_mmsg(reader, &mut packets[..]).await.unwrap();
assert_eq!(1, recv);
}
async fn check_send_batch(connection: &UdpTpuConnection, reader: &UdpSocket) {
let packets: Vec<_> = (0..32).map(|_| vec![0u8; PACKET_DATA_SIZE]).collect();
connection
.send_wire_transaction_batch(&packets)
.await
.unwrap();
let mut packets = vec![Packet::default(); 32];
let recv = recv_mmsg(reader, &mut packets[..]).await.unwrap();
assert_eq!(32, recv);
}
#[tokio::test]
async fn test_send_from_addr() {
let addr_str = "0.0.0.0:50100";
let addr = addr_str.parse().unwrap();
let socket =
solana_net_utils::bind_with_any_port(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))).unwrap();
let connection = UdpTpuConnection::new_from_addr(socket, addr);
let reader = UdpSocket::bind(addr_str).await.expect("bind");
check_send_one(&connection, &reader).await;
check_send_batch(&connection, &reader).await;
}
}

View File

@ -1,187 +0,0 @@
//! Simple client that connects to a given UDP port with the QUIC protocol and provides
//! an interface for sending transactions which is restricted by the server's flow control.
use {
crate::{
connection_cache_stats::ConnectionCacheStats,
nonblocking::{
quic_client::{
QuicClient, QuicLazyInitializedEndpoint,
QuicTpuConnection as NonblockingQuicTpuConnection,
},
tpu_connection::TpuConnection as NonblockingTpuConnection,
},
tpu_connection::{ClientStats, TpuConnection},
},
lazy_static::lazy_static,
log::*,
solana_sdk::transport::{Result as TransportResult, TransportError},
std::{
net::SocketAddr,
sync::{atomic::Ordering, Arc, Condvar, Mutex, MutexGuard},
time::Duration,
},
tokio::{runtime::Runtime, time::timeout},
};
const MAX_OUTSTANDING_TASK: u64 = 2000;
const SEND_TRANSACTION_TIMEOUT_MS: u64 = 10000;
/// A semaphore used for limiting the number of asynchronous tasks spawn to the
/// runtime. Before spawnning a task, use acquire. After the task is done (be it
/// succsess or failure), call release.
struct AsyncTaskSemaphore {
/// Keep the counter info about the usage
counter: Mutex<u64>,
/// Conditional variable for signaling when counter is decremented
cond_var: Condvar,
/// The maximum usage allowed by this semaphore.
permits: u64,
}
impl AsyncTaskSemaphore {
fn new(permits: u64) -> Self {
Self {
counter: Mutex::new(0),
cond_var: Condvar::new(),
permits,
}
}
/// When returned, the lock has been locked and usage count has been
/// incremented. When the returned MutexGuard is dropped the lock is dropped
/// without decrementing the usage count.
fn acquire(&self) -> MutexGuard<u64> {
let mut count = self.counter.lock().unwrap();
*count += 1;
while *count > self.permits {
count = self.cond_var.wait(count).unwrap();
}
count
}
/// Acquire the lock and decrement the usage count
fn release(&self) {
let mut count = self.counter.lock().unwrap();
*count -= 1;
self.cond_var.notify_one();
}
}
lazy_static! {
static ref ASYNC_TASK_SEMAPHORE: AsyncTaskSemaphore =
AsyncTaskSemaphore::new(MAX_OUTSTANDING_TASK);
static ref RUNTIME: Runtime = tokio::runtime::Builder::new_multi_thread()
.thread_name("quic-client")
.enable_all()
.build()
.unwrap();
}
pub struct QuicTpuConnection {
inner: Arc<NonblockingQuicTpuConnection>,
}
impl QuicTpuConnection {
pub fn new(
endpoint: Arc<QuicLazyInitializedEndpoint>,
tpu_addr: SocketAddr,
connection_stats: Arc<ConnectionCacheStats>,
) -> Self {
let inner = Arc::new(NonblockingQuicTpuConnection::new(
endpoint,
tpu_addr,
connection_stats,
));
Self { inner }
}
pub fn new_with_client(
client: Arc<QuicClient>,
connection_stats: Arc<ConnectionCacheStats>,
) -> Self {
let inner = Arc::new(NonblockingQuicTpuConnection::new_with_client(
client,
connection_stats,
));
Self { inner }
}
}
async fn send_wire_transaction_async(
connection: Arc<NonblockingQuicTpuConnection>,
wire_transaction: Vec<u8>,
) -> TransportResult<()> {
let result = timeout(
Duration::from_millis(SEND_TRANSACTION_TIMEOUT_MS),
connection.send_wire_transaction(wire_transaction),
)
.await;
ASYNC_TASK_SEMAPHORE.release();
handle_send_result(result, connection)
}
async fn send_wire_transaction_batch_async(
connection: Arc<NonblockingQuicTpuConnection>,
buffers: Vec<Vec<u8>>,
) -> TransportResult<()> {
let time_out = SEND_TRANSACTION_TIMEOUT_MS * buffers.len() as u64;
let result = timeout(
Duration::from_millis(time_out),
connection.send_wire_transaction_batch(&buffers),
)
.await;
ASYNC_TASK_SEMAPHORE.release();
handle_send_result(result, connection)
}
/// Check the send result and update stats if timedout. Returns the checked result.
fn handle_send_result(
result: Result<Result<(), TransportError>, tokio::time::error::Elapsed>,
connection: Arc<NonblockingQuicTpuConnection>,
) -> Result<(), TransportError> {
match result {
Ok(result) => result,
Err(_err) => {
let client_stats = ClientStats::default();
client_stats.send_timeout.fetch_add(1, Ordering::Relaxed);
let stats = connection.connection_stats();
stats.add_client_stats(&client_stats, 0, false);
info!("Timedout sending transaction {:?}", connection.tpu_addr());
Err(TransportError::Custom(
"Timedout sending transaction".to_string(),
))
}
}
}
impl TpuConnection for QuicTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
self.inner.tpu_addr()
}
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
RUNTIME.block_on(self.inner.send_wire_transaction_batch(buffers))?;
Ok(())
}
fn send_wire_transaction_async(&self, wire_transaction: Vec<u8>) -> TransportResult<()> {
let _lock = ASYNC_TASK_SEMAPHORE.acquire();
let inner = self.inner.clone();
let _ = RUNTIME
.spawn(async move { send_wire_transaction_async(inner, wire_transaction).await });
Ok(())
}
fn send_wire_transaction_batch_async(&self, buffers: Vec<Vec<u8>>) -> TransportResult<()> {
let _lock = ASYNC_TASK_SEMAPHORE.acquire();
let inner = self.inner.clone();
let _ =
RUNTIME.spawn(async move { send_wire_transaction_batch_async(inner, buffers).await });
Ok(())
}
}

View File

@ -1,8 +1,8 @@
pub use crate::nonblocking::tpu_client::TpuSenderError;
use {
crate::{
connection_cache::ConnectionCache,
nonblocking::tpu_client::TpuClient as NonblockingTpuClient,
tpu_connection_cache::{ConnectionPool, TpuConnectionCache},
},
rayon::iter::{IntoParallelIterator, ParallelIterator},
solana_rpc_client::rpc_client::RpcClient,
@ -19,7 +19,19 @@ use {
tokio::time::Duration,
};
type Result<T> = std::result::Result<T, TpuSenderError>;
pub mod temporary_pub {
use super::*;
pub type Result<T> = std::result::Result<T, TpuSenderError>;
/// Send at ~100 TPS
#[cfg(feature = "spinner")]
pub const SEND_TRANSACTION_INTERVAL: Duration = Duration::from_millis(10);
/// Retry batch send after 4 seconds
#[cfg(feature = "spinner")]
pub const TRANSACTION_RESEND_INTERVAL: Duration = Duration::from_secs(4);
}
use temporary_pub::*;
/// Default number of slots used to build TPU socket fanout set
pub const DEFAULT_FANOUT_SLOTS: u64 = 12;
@ -27,13 +39,6 @@ pub const DEFAULT_FANOUT_SLOTS: u64 = 12;
/// Maximum number of slots used to build TPU socket fanout set
pub const MAX_FANOUT_SLOTS: u64 = 100;
/// Send at ~100 TPS
#[cfg(feature = "spinner")]
pub(crate) const SEND_TRANSACTION_INTERVAL: Duration = Duration::from_millis(10);
/// Retry batch send after 4 seconds
#[cfg(feature = "spinner")]
pub(crate) const TRANSACTION_RESEND_INTERVAL: Duration = Duration::from_secs(4);
/// Config params for `TpuClient`
#[derive(Clone, Debug)]
pub struct TpuClientConfig {
@ -52,14 +57,14 @@ impl Default for TpuClientConfig {
/// Client which sends transactions directly to the current leader's TPU port over UDP.
/// The client uses RPC to determine the current leader and fetch node contact info
pub struct TpuClient {
pub struct TpuClient<P: ConnectionPool> {
_deprecated: UdpSocket, // TpuClient now uses the connection_cache to choose a send_socket
//todo: get rid of this field
rpc_client: Arc<RpcClient>,
tpu_client: Arc<NonblockingTpuClient>,
tpu_client: Arc<NonblockingTpuClient<P>>,
}
impl TpuClient {
impl<P: ConnectionPool> TpuClient<P> {
/// Serialize and send transaction to the current and upcoming leader TPUs according to fanout
/// size
pub fn send_transaction(&self, transaction: &Transaction) -> bool {
@ -121,7 +126,7 @@ impl TpuClient {
rpc_client: Arc<RpcClient>,
websocket_url: &str,
config: TpuClientConfig,
connection_cache: Arc<ConnectionCache>,
connection_cache: Arc<TpuConnectionCache<P>>,
) -> Result<Self> {
let create_tpu_client = NonblockingTpuClient::new_with_connection_cache(
rpc_client.get_inner_client().clone(),

View File

@ -1,6 +1,4 @@
use {
crate::{quic_client::QuicTpuConnection, udp_client::UdpTpuConnection},
enum_dispatch::enum_dispatch,
rayon::iter::{IntoParallelIterator, ParallelIterator},
solana_metrics::MovingStat,
solana_sdk::{transaction::VersionedTransaction, transport::Result as TransportResult},
@ -24,13 +22,6 @@ pub struct ClientStats {
pub send_timeout: AtomicU64,
}
#[enum_dispatch]
pub enum BlockingConnection {
UdpTpuConnection,
QuicTpuConnection,
}
#[enum_dispatch(BlockingConnection)]
pub trait TpuConnection {
fn tpu_addr(&self) -> &SocketAddr;

View File

@ -16,7 +16,7 @@ use {
};
// Should be non-zero
pub(crate) static MAX_CONNECTIONS: usize = 1024;
pub static MAX_CONNECTIONS: usize = 1024;
/// Used to decide whether the TPU and underlying connection cache should use
/// QUIC connections.
@ -262,7 +262,7 @@ pub enum ConnectionPoolError {
}
pub trait NewTpuConfig {
type ClientError;
type ClientError: std::fmt::Debug;
fn new() -> Result<Self, Self::ClientError>
where
Self: Sized;

View File

@ -10,6 +10,10 @@ documentation = "https://docs.rs/solana-udp-client"
edition = "2021"
[dependencies]
async-trait = "0.1.57"
solana-net-utils = { path = "../net-utils", version = "=1.15.0" }
solana-sdk = { path = "../sdk", version = "=1.15.0" }
solana-streamer = { path = "../streamer", version = "=1.15.0" }
solana-tpu-client = { path = "../tpu-client", version = "=1.15.0" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }

View File

@ -1,4 +1,93 @@
//! Simple UDP client that communicates with the given UDP port with UDP and provides
//! an interface for sending transactions
pub use solana_tpu_client::nonblocking::udp_client::*;
use {
async_trait::async_trait, core::iter::repeat, solana_sdk::transport::Result as TransportResult,
solana_streamer::nonblocking::sendmmsg::batch_send,
solana_tpu_client::nonblocking::tpu_connection::TpuConnection, std::net::SocketAddr,
tokio::net::UdpSocket,
};
pub struct UdpTpuConnection {
pub socket: UdpSocket,
pub addr: SocketAddr,
}
impl UdpTpuConnection {
pub fn new_from_addr(socket: std::net::UdpSocket, tpu_addr: SocketAddr) -> Self {
socket.set_nonblocking(true).unwrap();
let socket = UdpSocket::from_std(socket).unwrap();
Self {
socket,
addr: tpu_addr,
}
}
}
#[async_trait]
impl TpuConnection for UdpTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
&self.addr
}
async fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
self.socket
.send_to(wire_transaction.as_ref(), self.addr)
.await?;
Ok(())
}
async fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
let pkts: Vec<_> = buffers.iter().zip(repeat(self.tpu_addr())).collect();
batch_send(&self.socket, &pkts).await?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use {
super::*,
solana_sdk::packet::{Packet, PACKET_DATA_SIZE},
solana_streamer::nonblocking::recvmmsg::recv_mmsg,
std::net::{IpAddr, Ipv4Addr},
tokio::net::UdpSocket,
};
async fn check_send_one(connection: &UdpTpuConnection, reader: &UdpSocket) {
let packet = vec![111u8; PACKET_DATA_SIZE];
connection.send_wire_transaction(&packet).await.unwrap();
let mut packets = vec![Packet::default(); 32];
let recv = recv_mmsg(reader, &mut packets[..]).await.unwrap();
assert_eq!(1, recv);
}
async fn check_send_batch(connection: &UdpTpuConnection, reader: &UdpSocket) {
let packets: Vec<_> = (0..32).map(|_| vec![0u8; PACKET_DATA_SIZE]).collect();
connection
.send_wire_transaction_batch(&packets)
.await
.unwrap();
let mut packets = vec![Packet::default(); 32];
let recv = recv_mmsg(reader, &mut packets[..]).await.unwrap();
assert_eq!(32, recv);
}
#[tokio::test]
async fn test_send_from_addr() {
let addr_str = "0.0.0.0:50100";
let addr = addr_str.parse().unwrap();
let socket =
solana_net_utils::bind_with_any_port(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))).unwrap();
let connection = UdpTpuConnection::new_from_addr(socket, addr);
let reader = UdpSocket::bind(addr_str).await.expect("bind");
check_send_one(&connection, &reader).await;
check_send_batch(&connection, &reader).await;
}
}

View File

@ -1,4 +1,63 @@
//! Simple TPU client that communicates with the given UDP port with UDP and provides
//! an interface for sending transactions
pub use solana_tpu_client::udp_client::*;
use {
core::iter::repeat,
solana_sdk::transport::Result as TransportResult,
solana_streamer::sendmmsg::batch_send,
solana_tpu_client::{
connection_cache_stats::ConnectionCacheStats, tpu_connection::TpuConnection,
},
std::{
net::{SocketAddr, UdpSocket},
sync::Arc,
},
};
pub struct UdpTpuConnection {
pub socket: Arc<UdpSocket>,
pub addr: SocketAddr,
}
impl UdpTpuConnection {
pub fn new_from_addr(local_socket: Arc<UdpSocket>, tpu_addr: SocketAddr) -> Self {
Self {
socket: local_socket,
addr: tpu_addr,
}
}
pub fn new(
local_socket: Arc<UdpSocket>,
tpu_addr: SocketAddr,
_connection_stats: Arc<ConnectionCacheStats>,
) -> Self {
Self::new_from_addr(local_socket, tpu_addr)
}
}
impl TpuConnection for UdpTpuConnection {
fn tpu_addr(&self) -> &SocketAddr {
&self.addr
}
fn send_wire_transaction_async(&self, wire_transaction: Vec<u8>) -> TransportResult<()> {
self.socket.send_to(wire_transaction.as_ref(), self.addr)?;
Ok(())
}
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]> + Send + Sync,
{
let pkts: Vec<_> = buffers.iter().zip(repeat(self.tpu_addr())).collect();
batch_send(&self.socket, &pkts)?;
Ok(())
}
fn send_wire_transaction_batch_async(&self, buffers: Vec<Vec<u8>>) -> TransportResult<()> {
let pkts: Vec<_> = buffers.into_iter().zip(repeat(self.tpu_addr())).collect();
batch_send(&self.socket, &pkts)?;
Ok(())
}
}

View File

@ -70,7 +70,7 @@ use {
self, MAX_BATCH_SEND_RATE_MS, MAX_TRANSACTION_BATCH_SIZE,
},
solana_streamer::socket::SocketAddrSpace,
solana_tpu_client::connection_cache::{
solana_tpu_client::tpu_connection_cache::{
DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP,
},
solana_validator::{