add MAX_RECENT_BLOCKHASSHES and some logs (#49)

adding identity to the tpu client, connection cache

some refactoring in block_subscription

block pooling refactoring and rework on the test scripts

more refactoring after rebase

some more refactoring of block listner and tpu manager

limiting maximum number of parallel sents to 5

minor changes, cargo fmt, changing default value

waiting sometime before adding back errored block
This commit is contained in:
Aniket Prajapati 2023-02-06 20:45:26 +05:30 committed by Godmode Galactus
parent c2bcf6d126
commit 78cbafe0c2
No known key found for this signature in database
GPG Key ID: A04142C71ABB0DEA
12 changed files with 4850 additions and 592 deletions

View File

@ -23,7 +23,12 @@ function delay(ms: number) {
export async function main() {
const connection = new Connection(url, 'confirmed');
const connection = new Connection(url, 'finalized');
console.log('get latest blockhash')
const blockhash = await connection.getLatestBlockhash({
commitment: 'finalized'
});
console.log('blockhash : ' + blockhash.blockhash);
const authority = Keypair.fromSecretKey(
Uint8Array.from(
JSON.parse(
@ -35,14 +40,18 @@ export async function main() {
const users = InFile.users.map(x => Keypair.fromSecretKey(Uint8Array.from(x.secretKey)));
const userAccounts = InFile.tokenAccounts.map(x => new PublicKey(x));
let signatures_to_unpack: TransactionSignature[][] = [];
let signatures_to_unpack: TransactionSignature[][] = new Array<TransactionSignature[]>(forSeconds);
let time_taken_to_send = [];
for (let i = 0; i < forSeconds; ++i) {
console.log('Sending transaction ' + i);
const start = performance.now();
let signatures: TransactionSignature[] = [];
const blockhash = (await connection.getLatestBlockhash()).blockhash;
signatures_to_unpack[i] = new Array<TransactionSignature>(tps);
let blockhash = (await connection.getLatestBlockhash()).blockhash;
for (let j = 0; j < tps; ++j) {
if (j%100 == 0) {
blockhash = (await connection.getLatestBlockhash()).blockhash;
}
const toIndex = Math.floor(Math.random() * users.length);
let fromIndex = toIndex;
while (fromIndex === toIndex) {
@ -50,18 +59,14 @@ export async function main() {
}
const userFrom = userAccounts[fromIndex];
const userTo = userAccounts[toIndex];
if (skip_confirmations === false) {
const transaction = new Transaction().add(
splToken.createTransferInstruction(userFrom, userTo, users[fromIndex].publicKey, Math.ceil(Math.random() * 100))
);
transaction.recentBlockhash = blockhash;
transaction.feePayer = authority.publicKey;
const p = connection.sendTransaction(transaction, [authority, users[fromIndex]], { skipPreflight: true });
signatures.push(await p)
}
}
if (skip_confirmations === false) {
signatures_to_unpack.push(signatures)
const transaction = new Transaction().add(
splToken.createTransferInstruction(userFrom, userTo, users[fromIndex].publicKey, Math.ceil((Math.random()+1) * 100))
);
transaction.recentBlockhash = blockhash;
transaction.feePayer = authority.publicKey;
connection.sendTransaction(transaction, [authority, users[fromIndex]], { skipPreflight: true }).then(p => {signatures_to_unpack[i][j] = p});
}
const end = performance.now();
const diff = (end - start);
@ -72,7 +77,7 @@ export async function main() {
}
console.log('finish sending transactions');
await delay(5000)
await delay(10000)
console.log('checking for confirmations');
if (skip_confirmations === false) {
const size = signatures_to_unpack.length

View File

@ -1,79 +1,85 @@
import { Connection, Keypair } from '@solana/web3.js';
import { Connection, Keypair, LAMPORTS_PER_SOL, PublicKey } from '@solana/web3.js';
import * as fs from 'fs';
import * as splToken from "@solana/spl-token";
import * as os from 'os';
// number of users
const nbUsers = process.argv[2];
const nbUsers = +process.argv[2];
// url
const url = process.argv.length > 3 ? process.argv[3] : "http://0.0.0.0:8899";
// outfile
const outFile = process.argv.length > 4 ? process.argv[4] : "out.json";
console.log("creating " + nbUsers + " Users on " + url + " out file " + outFile);
(async () => {
function delay(ms: number) {
return new Promise( resolve => setTimeout(resolve, ms) );
}
export async function main() {
const connection = new Connection(url, 'confirmed');
const authority = Keypair.fromSecretKey(
let authority = Keypair.fromSecretKey(
Uint8Array.from(
JSON.parse(
process.env.KEYPAIR ||
JSON.parse(
process.env.KEYPAIR ||
fs.readFileSync(os.homedir() + '/.config/solana/id.json', 'utf-8'),
),
),
),
);
);
// create n key pairs
const userKps = [...Array(nbUsers)].map(_x => Keypair.generate())
// create and initialize new mint
const mint = await splToken.createMint(
let userKps = [...Array(nbUsers)].map(_x => Keypair.generate())
let mint = await splToken.createMint(
connection,
authority,
authority.publicKey,
null,
6,
);
// create accounts for each key pair created earlier
const accounts = await Promise.all(userKps.map(x => {
return splToken.createAccount(
let accounts : PublicKey[] = [];
for (const user of userKps) {
console.log("account created");
let account = await splToken.createAccount(
connection,
authority,
mint,
x.publicKey,
user.publicKey,
)
}));
accounts.push(account)
await delay(100)
};
// mint to accounts
await Promise.all(accounts.map(to => {
return splToken.mintTo(
for (const account of accounts) {
console.log("account minted");
await splToken.mintTo(
connection,
authority,
mint,
to,
account,
authority,
1_000_000_000_000,
)
}));
await delay(100)
};
const users = userKps.map(user => {
return {
publicKey: user.publicKey.toBase58(),
secretKey: Array.from(user.secretKey)
}
const users = userKps.map(x => {
const info = {
'publicKey' : x.publicKey.toBase58(),
'secretKey' : Array.from(x.secretKey)
};
return info;
});
const data = {
'users': users,
'tokenAccounts': accounts,
'mint': mint,
'minted_amount': 1_000_000_000_000
'users' : users,
'tokenAccounts' : accounts,
'mint' : mint,
'minted_amount' : 1_000_000_000_000
};
console.log('created ' + nbUsers + ' Users and minted 10^12 tokens for mint ' + mint);
fs.writeFileSync(outFile, JSON.stringify(data));
}
})()
main().then(x => {
console.log('finished sucessfully')
}).catch(e => {
console.log('caught an error : ' + e)
})

4214
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,6 @@ use std::sync::Arc;
use dashmap::DashMap;
use log::info;
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
use solana_sdk::commitment_config::CommitmentConfig;
use tokio::sync::RwLock;
@ -12,8 +11,8 @@ use crate::workers::BlockInformation;
#[derive(Clone)]
pub struct BlockStore {
blocks: Arc<DashMap<String, BlockInformation>>,
latest_confirmed_blockhash: Arc<RwLock<String>>,
latest_finalized_blockhash: Arc<RwLock<String>>,
latest_confirmed_blockinfo: Arc<RwLock<BlockInformation>>,
latest_finalized_blockinfo: Arc<RwLock<BlockInformation>>,
}
impl BlockStore {
@ -24,8 +23,8 @@ impl BlockStore {
Self::fetch_latest(rpc_client, CommitmentConfig::finalized()).await?;
Ok(Self {
latest_confirmed_blockhash: Arc::new(RwLock::new(confirmed_blockhash.clone())),
latest_finalized_blockhash: Arc::new(RwLock::new(finalized_blockhash.clone())),
latest_confirmed_blockinfo: Arc::new(RwLock::new(confirmed_block.clone())),
latest_finalized_blockinfo: Arc::new(RwLock::new(finalized_block.clone())),
blocks: Arc::new({
let map = DashMap::new();
map.insert(confirmed_blockhash, confirmed_block);
@ -48,7 +47,14 @@ impl BlockStore {
.get_slot_with_commitment(commitment_config)
.await?;
Ok((latest_block_hash, BlockInformation { slot, block_height }))
Ok((
latest_block_hash.clone(),
BlockInformation {
slot,
block_height,
blockhash: latest_block_hash,
},
))
}
pub async fn get_block_info(&self, blockhash: &str) -> Option<BlockInformation> {
@ -59,48 +65,48 @@ impl BlockStore {
Some(info.value().to_owned())
}
pub fn get_latest_blockhash(&self, commitment_config: CommitmentConfig) -> Arc<RwLock<String>> {
// private
fn get_latest_blockinfo_lock(
&self,
commitment_config: CommitmentConfig,
) -> Arc<RwLock<BlockInformation>> {
if commitment_config.is_finalized() {
self.latest_finalized_blockhash.clone()
self.latest_finalized_blockinfo.clone()
} else {
self.latest_confirmed_blockhash.clone()
self.latest_confirmed_blockinfo.clone()
}
}
pub async fn get_latest_block_info(
&self,
commitment_config: CommitmentConfig,
) -> (String, BlockInformation) {
let blockhash = self
.get_latest_blockhash(commitment_config)
) -> BlockInformation {
let block_info = self
.get_latest_blockinfo_lock(commitment_config)
.read()
.await
.to_owned();
.clone();
let block_info = self
.blocks
.get(&blockhash)
.expect("Race Condition: Latest block not in block store")
.value()
.to_owned();
(blockhash, block_info)
block_info
}
pub async fn add_block(
&self,
blockhash: String,
block_info: BlockInformation,
commitment_config: CommitmentConfig,
) {
info!("ab {blockhash} {block_info:?}");
let blockhash = block_info.blockhash.clone();
// Write to block store first in order to prevent
// any race condition i.e prevent some one to
// ask the map what it doesn't have rn
let slot = block_info.slot;
self.blocks.insert(blockhash.clone(), block_info);
if slot > self.get_latest_block_info(commitment_config).await.1.slot {
*self.get_latest_blockhash(commitment_config).write().await = blockhash;
self.blocks.insert(blockhash, block_info.clone());
let last_recent_block = self.get_latest_block_info(commitment_config).await;
if last_recent_block.slot < block_info.slot {
*self
.get_latest_blockinfo_lock(commitment_config)
.write()
.await = block_info;
}
}
}

View File

@ -24,7 +24,6 @@ use solana_rpc_client_api::{
config::{RpcContextConfig, RpcRequestAirdropConfig, RpcSignatureStatusConfig},
response::{Response as RpcResponse, RpcBlockhash, RpcResponseContext, RpcVersionInfo},
};
use solana_sdk::clock::MAX_RECENT_BLOCKHASHES;
use solana_sdk::{
commitment_config::CommitmentConfig, hash::Hash, pubkey::Pubkey, signature::Keypair,
transaction::VersionedTransaction,
@ -249,7 +248,11 @@ impl LiteRpcServer for LiteBridge {
.map(|config| config.commitment.unwrap_or_default())
.unwrap_or_default();
let (blockhash, BlockInformation { slot, block_height }) = self
let BlockInformation {
slot,
block_height,
blockhash,
} = self
.block_store
.get_latest_block_info(commitment_config)
.await;
@ -263,7 +266,7 @@ impl LiteRpcServer for LiteBridge {
},
value: RpcBlockhash {
blockhash,
last_valid_block_height: block_height + (MAX_RECENT_BLOCKHASHES as u64),
last_valid_block_height: block_height + 150,
},
})
}
@ -300,7 +303,6 @@ impl LiteRpcServer for LiteBridge {
.block_store
.get_latest_block_info(commitment)
.await
.1
.slot;
Ok(RpcResponse {
@ -335,7 +337,6 @@ impl LiteRpcServer for LiteBridge {
.block_store
.get_latest_block_info(CommitmentConfig::finalized())
.await
.1
.slot,
api_version: None,
},

View File

@ -20,7 +20,7 @@ pub const DEFAULT_WS_ADDR: &str = "ws://0.0.0.0:8900";
#[from_env]
pub const DEFAULT_TX_MAX_RETRIES: u16 = 1;
#[from_env]
pub const DEFAULT_TX_BATCH_SIZE: usize = 1 << 7;
pub const DEFAULT_TX_BATCH_SIZE: usize = 128;
#[from_env]
pub const DEFAULT_FANOUT_SIZE: u64 = 32;
#[from_env]

View File

@ -26,7 +26,8 @@ const TPU_CONNECTION_CACHE_SIZE: usize = 8;
pub struct TpuManager {
error_count: Arc<AtomicU32>,
rpc_client: Arc<RpcClient>,
tpu_client: Arc<RwLock<QuicTpuClient>>,
// why arc twice / one is so that we clone rwlock and other so that we can clone tpu client
tpu_client: Arc<RwLock<Arc<QuicTpuClient>>>,
pub ws_addr: String,
fanout_slots: u64,
connection_cache: Arc<QuicConnectionCache>,
@ -54,7 +55,7 @@ impl TpuManager {
connection_cache.clone(),
)
.await?;
let tpu_client = Arc::new(RwLock::new(tpu_client));
let tpu_client = Arc::new(RwLock::new(Arc::new(tpu_client)));
Ok(Self {
rpc_client,
@ -93,21 +94,23 @@ impl TpuManager {
)
.await?;
self.error_count.store(0, Ordering::Relaxed);
*self.tpu_client.write().await = tpu_client;
*self.tpu_client.write().await = Arc::new(tpu_client);
info!("TPU Reset after 5 errors");
}
Ok(())
}
async fn get_tpu_client(&self) -> Arc<QuicTpuClient> {
self.tpu_client.read().await.clone()
}
pub async fn try_send_wire_transaction_batch(
&self,
wire_transactions: Vec<Vec<u8>>,
) -> anyhow::Result<()> {
match self
.tpu_client
.read()
.await
let tpu_client = self.get_tpu_client().await;
match tpu_client
.try_send_wire_transaction_batch(wire_transactions)
.await
{
@ -120,6 +123,7 @@ impl TpuManager {
}
pub async fn estimated_current_slot(&self) -> u64 {
self.tpu_client.read().await.estimated_current_slot()
let tpu_client = self.get_tpu_client().await;
tpu_client.estimated_current_slot()
}
}

View File

@ -1,11 +1,11 @@
use std::sync::{
atomic::{AtomicU64, Ordering},
Arc,
use std::{
collections::{BTreeSet, VecDeque},
sync::Arc,
};
use dashmap::DashMap;
use jsonrpsee::SubscriptionSink;
use log::{error, info, warn};
use log::{info, warn};
use prometheus::{histogram_opts, opts, register_counter, register_histogram, Counter, Histogram};
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
@ -20,11 +20,14 @@ use solana_sdk::{
};
use solana_transaction_status::{
option_serializer::OptionSerializer, EncodedTransaction, RewardType,
TransactionConfirmationStatus, TransactionDetails, TransactionStatus, UiConfirmedBlock,
UiTransactionEncoding, UiTransactionStatusMeta,
option_serializer::OptionSerializer, RewardType, TransactionConfirmationStatus,
TransactionDetails, TransactionStatus, UiConfirmedBlock, UiTransactionEncoding,
UiTransactionStatusMeta,
};
use tokio::{
sync::{mpsc::Sender, Mutex},
task::JoinHandle,
};
use tokio::{sync::mpsc::Sender, task::JoinHandle};
use crate::{
block_store::BlockStore,
@ -72,6 +75,7 @@ pub struct BlockListener {
pub struct BlockInformation {
pub slot: u64,
pub block_height: u64,
pub blockhash: String,
}
pub struct BlockListnerNotificatons {
@ -105,8 +109,7 @@ impl BlockListener {
commitment_config: CommitmentConfig,
sink: SubscriptionSink,
) {
let _ = self
.signature_subscribers
self.signature_subscribers
.insert((signature, commitment_config), sink);
}
@ -129,6 +132,7 @@ impl BlockListener {
commitment_config: CommitmentConfig,
postgres: Option<PostgresMpscSend>,
) -> anyhow::Result<()> {
//info!("indexing slot {} commitment {}", slot, commitment_config.commitment);
let comfirmation_status = match commitment_config.commitment {
CommitmentLevel::Finalized => TransactionConfirmationStatus::Finalized,
_ => TransactionConfirmationStatus::Confirmed,
@ -148,8 +152,8 @@ impl BlockListener {
transaction_details: Some(TransactionDetails::Full),
commitment: Some(commitment_config),
max_supported_transaction_version: Some(0),
encoding: Some(UiTransactionEncoding::JsonParsed),
..Default::default()
encoding: Some(UiTransactionEncoding::Binary),
rewards: Some(false),
},
)
.await?;
@ -157,6 +161,7 @@ impl BlockListener {
timer.observe_duration();
if commitment_config.is_finalized() {
info!("finalized slot {}", slot);
FIN_BLOCKS_RECV.inc();
} else {
CON_BLOCKS_RECV.inc();
@ -177,8 +182,11 @@ impl BlockListener {
self.block_store
.add_block(
blockhash.clone(),
BlockInformation { slot, block_height },
BlockInformation {
slot,
block_height,
blockhash: blockhash.clone(),
},
commitment_config,
)
.await;
@ -211,13 +219,14 @@ impl BlockListener {
continue;
};
let sig = match tx.transaction {
EncodedTransaction::Json(json) => json.signatures[0].to_string(),
_ => {
error!("Expected jsonParsed encoded tx");
let tx = match tx.transaction.decode() {
Some(tx) => tx,
None => {
warn!("transaction could not be decoded");
continue;
}
};
let sig = tx.signatures[0].to_string();
if let Some(mut tx_status) = self.tx_sender.txs_sent.get_mut(&sig) {
//
@ -283,23 +292,34 @@ impl BlockListener {
commitment_config: CommitmentConfig,
postgres: Option<PostgresMpscSend>,
) -> JoinHandle<anyhow::Result<()>> {
let (send, recv) = flume::unbounded();
let get_block_errors = Arc::new(AtomicU64::new(0));
let slots_task_queue = Arc::new(Mutex::new(VecDeque::<(u64, u8)>::new()));
// task to fetch blocks
for _i in 0..6 {
let this = self.clone();
let postgres = postgres.clone();
let recv = recv.clone();
let send = send.clone();
let get_block_errors = get_block_errors.clone();
let slots_task_queue = slots_task_queue.clone();
tokio::spawn(async move {
while let Ok(slot) = recv.recv_async().await {
if get_block_errors.load(Ordering::Relaxed) > 6 {
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
get_block_errors.fetch_sub(1, Ordering::Relaxed);
let slots_task_queue = slots_task_queue.clone();
loop {
let (slot, error_count) = {
let mut queue = slots_task_queue.lock().await;
match queue.pop_front() {
Some(t) => t,
None => {
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
continue;
}
}
};
if error_count > 10 {
warn!(
"unable to get block at slot {} and commitment {}",
slot, commitment_config.commitment
);
continue;
}
// println!("{i} thread in slot {slot}");
if let Err(err) = this
.index_slot(slot, commitment_config, postgres.clone())
@ -308,76 +328,67 @@ impl BlockListener {
warn!(
"Error while indexing {commitment_config:?} block with slot {slot} {err}"
);
get_block_errors.fetch_add(1, Ordering::Relaxed);
send.send_async(slot).await.unwrap();
tokio::time::sleep(tokio::time::Duration::from_millis(50)).await;
{
let mut queue = slots_task_queue.lock().await;
queue.push_back((slot, error_count + 1));
}
};
// println!("{i} thread done slot {slot}");
}
});
}
let (latest_slot_send, mut latest_slot_recv) = tokio::sync::mpsc::channel(1);
{
let this = self.clone();
let latest_slot_send = latest_slot_send.clone();
tokio::spawn(async move {
while let Some(latest_slot) = latest_slot_recv.recv().await {
if let Err(err) = this
.index_slot(latest_slot, commitment_config, postgres.clone())
.await
{
warn!(
"Error while indexing latest {commitment_config:?} block with slot {latest_slot} {err}"
);
get_block_errors.fetch_add(1, Ordering::Relaxed);
latest_slot_send.send(latest_slot).await.unwrap();
};
}
});
}
let rpc_client = self.rpc_client.clone();
tokio::spawn(async move {
let mut slot = self
let slots_task_queue = slots_task_queue.clone();
let last_latest_slot = self
.block_store
.get_latest_block_info(commitment_config)
.await
.1
.slot;
// -5 for warmup
let mut last_latest_slot = last_latest_slot - 5;
// storage for recent slots processed
const SLOT_PROCESSED_SIZE: usize = 128;
let mut slot_processed = BTreeSet::<u64>::new();
let rpc_client = rpc_client.clone();
loop {
info!("{commitment_config:?} {slot}");
let mut new_block_slots = self
.rpc_client
.get_blocks_with_commitment(slot, None, commitment_config)
let new_slot = rpc_client
.get_slot_with_commitment(commitment_config)
.await?;
// filter already processed slots
let new_block_slots: Vec<u64> = (last_latest_slot..new_slot)
.filter(|x| !slot_processed.contains(x))
.map(|x| x)
.collect();
if new_block_slots.is_empty() {
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
tokio::time::sleep(tokio::time::Duration::from_millis(200)).await;
println!("no slots");
continue;
}
//info!("Received new slots {commitment_config:?} {last_latest_slot}");
info!("Received new slots");
let latest_slot = *new_block_slots.last().unwrap();
let Some(latest_slot) = new_block_slots.pop() else {
warn!("Didn't receive any block slots for {slot}");
continue;
};
slot = latest_slot;
latest_slot_send.send(latest_slot).await?;
for slot in new_block_slots {
send.send_async(slot).await?;
// context for lock
{
let mut lock = slots_task_queue.lock().await;
for slot in new_block_slots {
lock.push_back((slot, 0));
if slot_processed.insert(slot) && slot_processed.len() > SLOT_PROCESSED_SIZE
{
slot_processed.pop_first();
}
}
}
last_latest_slot = latest_slot;
tokio::time::sleep(tokio::time::Duration::from_millis(200)).await;
}
})
}

View File

@ -10,7 +10,7 @@ use log::{info, warn};
use prometheus::{register_counter, Counter};
use solana_transaction_status::TransactionStatus;
use tokio::{
sync::mpsc::{error::TryRecvError, UnboundedReceiver},
sync::{mpsc::UnboundedReceiver, TryAcquireError},
task::JoinHandle,
};
@ -35,6 +35,8 @@ pub struct TxSender {
pub txs_sent: Arc<DashMap<String, TxProps>>,
/// TpuClient to call the tpu port
pub tpu_manager: Arc<TpuManager>,
counting_semaphore: Arc<tokio::sync::Semaphore>,
}
/// Transaction Properties
@ -58,62 +60,10 @@ impl TxSender {
Self {
tpu_manager,
txs_sent: Default::default(),
counting_semaphore: Arc::new(tokio::sync::Semaphore::new(5)),
}
}
/// retry enqued_tx(s)
async fn forward_txs(
&self,
sigs_and_slots: Vec<(String, u64)>,
txs: Vec<WireTransaction>,
postgres: Option<PostgresMpscSend>,
) {
assert_eq!(sigs_and_slots.len(), txs.len());
if sigs_and_slots.is_empty() {
return;
}
let tpu_client = self.tpu_manager.clone();
let txs_sent = self.txs_sent.clone();
tokio::spawn(async move {
let quic_response = match tpu_client.try_send_wire_transaction_batch(txs).await {
Ok(_) => {
for (sig, _) in &sigs_and_slots {
txs_sent.insert(sig.to_owned(), TxProps::default());
}
// metrics
TXS_SENT.inc_by(sigs_and_slots.len() as f64);
1
}
Err(err) => {
warn!("{err}");
0
}
};
if let Some(postgres) = postgres {
let forwarded_slot: u64 = tpu_client.estimated_current_slot().await;
for (sig, recent_slot) in sigs_and_slots {
postgres
.send(PostgresMsg::PostgresTx(PostgresTx {
signature: sig.clone(),
recent_slot: recent_slot as i64,
forwarded_slot: forwarded_slot as i64,
processed_slot: None,
cu_consumed: None,
cu_requested: None,
quic_response,
}))
.expect("Error writing to postgres service");
}
}
});
}
/// retry and confirm transactions every 2ms (avg time to confirm tx)
pub fn execute(
self,
@ -124,33 +74,105 @@ impl TxSender {
) -> JoinHandle<anyhow::Result<()>> {
tokio::spawn(async move {
info!(
"Batching tx(s) with batch size of {tx_batch_size} every {}ms",
"Batching tx(s) with batch size of {tx_batch_size} every {} ms",
tx_send_interval.as_millis()
);
loop {
let mut sigs_and_slots = Vec::with_capacity(tx_batch_size);
let mut txs = Vec::with_capacity(tx_batch_size);
let mut maybe_permit = None;
let counting_semaphore = self.counting_semaphore.clone();
while txs.len() <= tx_batch_size {
match recv.try_recv() {
Ok((sig, tx, slot)) => {
sigs_and_slots.push((sig, slot));
txs.push(tx);
}
Err(TryRecvError::Disconnected) => {
bail!("Channel Disconnected");
}
let res = tokio::time::timeout(tx_send_interval, recv.recv()).await;
match res {
Ok(value) => match value {
Some((sig, tx, slot)) => {
sigs_and_slots.push((sig, slot));
txs.push(tx);
}
None => {
bail!("Channel Disconnected");
}
},
_ => {
break;
let res = counting_semaphore.clone().try_acquire_owned();
match res {
Ok(permit) => {
maybe_permit = Some(permit);
break;
}
Err(TryAcquireError::Closed) => {
bail!("Semaphone permit error");
}
Err(TryAcquireError::NoPermits) => {
// No permits continue to fetch transactions and batch them
}
}
}
}
}
assert_eq!(sigs_and_slots.len(), txs.len());
self.forward_txs(sigs_and_slots, txs, postgres_send.clone())
.await;
if sigs_and_slots.is_empty() {
continue;
}
tokio::time::sleep(tx_send_interval).await;
let permit = match maybe_permit {
Some(permit) => permit,
None => {
// get the permit
counting_semaphore.acquire_owned().await.unwrap()
}
};
let postgres_send = postgres_send.clone();
let tpu_client = self.tpu_manager.clone();
let txs_sent = self.txs_sent.clone();
tokio::spawn(async move {
let semaphore_permit = permit;
for (sig, _) in &sigs_and_slots {
txs_sent.insert(sig.to_owned(), TxProps::default());
}
info!(
"sending {} transactions by tpu size {}",
txs.len(),
txs_sent.len()
);
let quic_response = {
let _semaphore_permit = semaphore_permit;
match tpu_client.try_send_wire_transaction_batch(txs).await {
Ok(_) => {
// metrics
TXS_SENT.inc_by(sigs_and_slots.len() as f64);
1
}
Err(err) => {
warn!("{err}");
0
}
}
};
if let Some(postgres) = postgres_send {
let forwarded_slot: u64 = tpu_client.estimated_current_slot().await;
for (sig, recent_slot) in sigs_and_slots {
postgres
.send(PostgresMsg::PostgresTx(PostgresTx {
signature: sig.clone(),
recent_slot: recent_slot as i64,
forwarded_slot: forwarded_slot as i64,
processed_slot: None,
cu_consumed: None,
cu_requested: None,
quic_response,
}))
.expect("Error writing to postgres service");
}
}
});
}
})
}

View File

@ -1,4 +1,3 @@
use lite_rpc::DEFAULT_LITE_RPC_ADDR;
use solana_rpc_client::nonblocking::rpc_client::RpcClient;
@ -8,8 +7,8 @@ async fn blockhash() -> anyhow::Result<()> {
let mut prev_blockhash = lite_rpc.get_latest_blockhash().await.unwrap();
for _ in 0..5 {
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
for _ in 0..60 {
tokio::time::sleep(tokio::time::Duration::from_millis(3000)).await;
let blockhash = lite_rpc.get_latest_blockhash().await.unwrap();

View File

@ -35,7 +35,7 @@
// "types": [], /* Specify type package names to be included without being referenced in a source file. */
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
// "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */
// "resolveJsonModule": true, /* Enable importing .json files. */
"resolveJsonModule": true, /* Enable importing .json files. */
// "noResolve": true, /* Disallow 'import's, 'require's or '<reference>'s from expanding the number of files TypeScript should add to a project. */
/* JavaScript Support */

686
yarn.lock

File diff suppressed because it is too large Load Diff