nit: always pass &Arc<Bank>, clone() only where consumed

This commit is contained in:
Rob Walker 2019-02-27 10:23:27 -08:00 committed by Grimes
parent b6ccb475f1
commit 9adbc1dd60
7 changed files with 16 additions and 14 deletions

View File

@ -71,7 +71,7 @@ impl BankingStage {
// Single thread to compute confirmation
let leader_confirmation_service =
LeaderConfirmationService::new(bank.clone(), leader_id, exit.clone());
LeaderConfirmationService::new(&bank, leader_id, exit.clone());
// Many banks that process transactions in parallel.
let bank_thread_hdls: Vec<JoinHandle<UnprocessedPackets>> = (0..Self::num_threads())

View File

@ -234,7 +234,7 @@ impl BroadcastService {
#[allow(clippy::too_many_arguments)]
pub fn new(
slot_height: u64,
bank: Arc<Bank>,
bank: &Arc<Bank>,
sock: UdpSocket,
cluster_info: Arc<RwLock<ClusterInfo>>,
blob_index: u64,
@ -244,6 +244,7 @@ impl BroadcastService {
) -> Self {
let exit_signal = Arc::new(AtomicBool::new(false));
let blocktree = blocktree.clone();
let bank = bank.clone();
let thread_hdl = Builder::new()
.name("solana-broadcaster".to_string())
.spawn(move || {
@ -322,7 +323,7 @@ mod test {
// Start up the broadcast stage
let broadcast_service = BroadcastService::new(
slot_height,
bank.clone(),
&bank,
leader_info.sockets.broadcast,
cluster_info,
blob_index,

View File

@ -283,7 +283,7 @@ impl Fullnode {
// TODO: This is not the correct bank. Instead TVU should pass along the
// frozen Bank for each completed block for RPC to use from it's notion of the "best"
// available fork (until we want to surface multiple forks to RPC)
rpc_service.set_bank(self.bank_forks.read().unwrap().working_bank());
rpc_service.set_bank(&self.bank_forks.read().unwrap().working_bank());
}
if rotation_info.leader_id == self.id {
@ -307,7 +307,7 @@ impl Fullnode {
}
};
self.node_services.tpu.switch_to_leader(
self.bank_forks.read().unwrap().working_bank(),
&self.bank_forks.read().unwrap().working_bank(),
&self.poh_recorder,
self.tpu_sockets
.iter()

View File

@ -105,7 +105,8 @@ impl LeaderConfirmationService {
}
/// Create a new LeaderConfirmationService for computing confirmation.
pub fn new(bank: Arc<Bank>, leader_id: Pubkey, exit: Arc<AtomicBool>) -> Self {
pub fn new(bank: &Arc<Bank>, leader_id: Pubkey, exit: Arc<AtomicBool>) -> Self {
let bank = bank.clone();
let thread_hdl = Builder::new()
.name("solana-leader-confirmation-service".to_string())
.spawn(move || {

View File

@ -35,8 +35,8 @@ impl JsonRpcRequestProcessor {
})
}
pub fn set_bank(&mut self, bank: Arc<Bank>) {
self.bank = Some(bank);
pub fn set_bank(&mut self, bank: &Arc<Bank>) {
self.bank = Some(bank.clone());
}
pub fn new(storage_state: StorageState) -> Self {
@ -377,7 +377,7 @@ mod tests {
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
StorageState::default(),
)));
request_processor.write().unwrap().set_bank(bank);
request_processor.write().unwrap().set_bank(&bank);
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(NodeInfo::default())));
let leader = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
@ -404,7 +404,7 @@ mod tests {
let bob_pubkey = Keypair::new().pubkey();
let bank = Arc::new(Bank::new(&genesis_block));
let mut request_processor = JsonRpcRequestProcessor::new(StorageState::default());
request_processor.set_bank(bank.clone());
request_processor.set_bank(&bank);
thread::spawn(move || {
let last_id = bank.last_id();
let tx = SystemTransaction::new_move(&alice, bob_pubkey, 20, last_id, 0);
@ -573,7 +573,7 @@ mod tests {
let meta = Meta {
request_processor: {
let mut request_processor = JsonRpcRequestProcessor::new(StorageState::default());
request_processor.set_bank(bank);
request_processor.set_bank(&bank);
Arc::new(RwLock::new(request_processor))
},
cluster_info: Arc::new(RwLock::new(ClusterInfo::new(NodeInfo::default()))),

View File

@ -71,7 +71,7 @@ impl JsonRpcService {
}
}
pub fn set_bank(&mut self, bank: Arc<Bank>) {
pub fn set_bank(&mut self, bank: &Arc<Bank>) {
self.request_processor.write().unwrap().set_bank(bank);
}
@ -117,7 +117,7 @@ mod tests {
);
let mut rpc_service =
JsonRpcService::new(&cluster_info, rpc_addr, drone_addr, StorageState::default());
rpc_service.set_bank(Arc::new(bank));
rpc_service.set_bank(&Arc::new(bank));
let thread = rpc_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");

View File

@ -189,7 +189,7 @@ impl Tpu {
#[allow(clippy::too_many_arguments)]
pub fn switch_to_leader(
&mut self,
bank: Arc<Bank>,
bank: &Arc<Bank>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
transactions_sockets: Vec<UdpSocket>,
broadcast_socket: UdpSocket,