2018-12-07 19:16:27 -08:00
|
|
|
use crate::blob_fetch_stage::BlobFetchStage;
|
2018-12-08 21:52:29 -08:00
|
|
|
#[cfg(feature = "chacha")]
|
|
|
|
use crate::chacha::{chacha_cbc_encrypt_file, CHACHA_BLOCK_SIZE};
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::client::mk_client;
|
|
|
|
use crate::cluster_info::{ClusterInfo, Node, NodeInfo};
|
|
|
|
use crate::db_ledger::DbLedger;
|
|
|
|
use crate::gossip_service::GossipService;
|
|
|
|
use crate::leader_scheduler::LeaderScheduler;
|
|
|
|
use crate::ledger::LEDGER_DATA_FILE;
|
|
|
|
use crate::result::Result;
|
|
|
|
use crate::rpc_request::{RpcClient, RpcRequest};
|
|
|
|
use crate::service::Service;
|
2018-12-10 11:38:29 -08:00
|
|
|
use crate::storage_stage::ENTRIES_PER_SEGMENT;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::store_ledger_stage::StoreLedgerStage;
|
|
|
|
use crate::streamer::BlobReceiver;
|
2018-12-10 11:21:28 -08:00
|
|
|
use crate::thin_client::retry_get_balance;
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::window_service::window_service;
|
2018-11-02 08:40:29 -07:00
|
|
|
use rand::thread_rng;
|
|
|
|
use rand::Rng;
|
|
|
|
use solana_drone::drone::{request_airdrop_transaction, DRONE_PORT};
|
2018-11-16 08:04:46 -08:00
|
|
|
use solana_sdk::hash::{Hash, Hasher};
|
2018-11-02 08:40:29 -07:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
|
|
|
use solana_sdk::storage_program::StorageTransaction;
|
|
|
|
use solana_sdk::transaction::Transaction;
|
2018-10-02 11:47:51 -07:00
|
|
|
use std::fs::File;
|
|
|
|
use std::io;
|
|
|
|
use std::io::BufReader;
|
|
|
|
use std::io::Read;
|
|
|
|
use std::io::Seek;
|
|
|
|
use std::io::SeekFrom;
|
|
|
|
use std::io::{Error, ErrorKind};
|
|
|
|
use std::mem::size_of;
|
2018-09-21 15:32:15 -07:00
|
|
|
use std::net::UdpSocket;
|
2018-10-02 11:47:51 -07:00
|
|
|
use std::path::Path;
|
2018-11-02 08:40:29 -07:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
2018-09-21 15:32:15 -07:00
|
|
|
use std::sync::mpsc::channel;
|
|
|
|
use std::sync::{Arc, RwLock};
|
2018-11-02 08:40:29 -07:00
|
|
|
use std::thread::sleep;
|
2018-09-21 15:32:15 -07:00
|
|
|
use std::thread::JoinHandle;
|
2018-09-24 11:50:37 -07:00
|
|
|
use std::time::Duration;
|
2018-09-21 15:32:15 -07:00
|
|
|
|
|
|
|
pub struct Replicator {
|
2018-12-06 12:52:47 -08:00
|
|
|
gossip_service: GossipService,
|
2018-09-21 15:32:15 -07:00
|
|
|
fetch_stage: BlobFetchStage,
|
|
|
|
store_ledger_stage: StoreLedgerStage,
|
2018-10-10 16:49:41 -07:00
|
|
|
t_window: JoinHandle<()>,
|
2018-09-21 15:32:15 -07:00
|
|
|
pub retransmit_receiver: BlobReceiver,
|
2018-11-02 08:40:29 -07:00
|
|
|
exit: Arc<AtomicBool>,
|
2018-09-21 15:32:15 -07:00
|
|
|
}
|
|
|
|
|
2018-10-02 11:47:51 -07:00
|
|
|
pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> {
|
|
|
|
let in_file = File::open(in_path)?;
|
|
|
|
let metadata = in_file.metadata()?;
|
|
|
|
let mut buffer_file = BufReader::new(in_file);
|
|
|
|
|
|
|
|
let mut hasher = Hasher::default();
|
|
|
|
let sample_size = size_of::<Hash>();
|
|
|
|
let sample_size64 = sample_size as u64;
|
|
|
|
let mut buf = vec![0; sample_size];
|
|
|
|
|
|
|
|
let file_len = metadata.len();
|
2018-10-08 13:12:33 -07:00
|
|
|
if file_len < sample_size64 {
|
|
|
|
return Err(Error::new(ErrorKind::Other, "file too short!"));
|
|
|
|
}
|
2018-10-02 11:47:51 -07:00
|
|
|
for offset in sample_offsets {
|
|
|
|
if *offset > (file_len - sample_size64) / sample_size64 {
|
|
|
|
return Err(Error::new(ErrorKind::Other, "offset too large"));
|
|
|
|
}
|
|
|
|
buffer_file.seek(SeekFrom::Start(*offset * sample_size64))?;
|
2018-10-02 16:10:56 -07:00
|
|
|
trace!("sampling @ {} ", *offset);
|
2018-10-02 11:47:51 -07:00
|
|
|
match buffer_file.read(&mut buf) {
|
|
|
|
Ok(size) => {
|
|
|
|
assert_eq!(size, buf.len());
|
|
|
|
hasher.hash(&buf);
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
warn!("Error sampling file");
|
|
|
|
return Err(e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(hasher.result())
|
|
|
|
}
|
|
|
|
|
2018-09-21 15:32:15 -07:00
|
|
|
impl Replicator {
|
2018-12-07 19:01:28 -08:00
|
|
|
#[allow(clippy::new_ret_no_self)]
|
2018-09-21 15:32:15 -07:00
|
|
|
pub fn new(
|
|
|
|
ledger_path: Option<&str>,
|
|
|
|
node: Node,
|
2018-11-02 08:40:29 -07:00
|
|
|
leader_info: &NodeInfo,
|
|
|
|
keypair: &Keypair,
|
|
|
|
) -> Result<Self> {
|
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
|
|
|
let done = Arc::new(AtomicBool::new(false));
|
|
|
|
|
|
|
|
info!("Replicator: id: {}", keypair.pubkey());
|
|
|
|
info!("Creating cluster info....");
|
2018-12-10 11:38:29 -08:00
|
|
|
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(node.info.clone())));
|
2018-09-21 15:32:15 -07:00
|
|
|
|
2018-11-02 08:40:29 -07:00
|
|
|
let leader_pubkey = leader_info.id;
|
|
|
|
{
|
|
|
|
let mut cluster_info_w = cluster_info.write().unwrap();
|
|
|
|
cluster_info_w.insert_info(leader_info.clone());
|
|
|
|
cluster_info_w.set_leader(leader_info.id);
|
2018-09-21 15:32:15 -07:00
|
|
|
}
|
|
|
|
|
2018-09-21 16:50:58 -07:00
|
|
|
let (entry_window_sender, entry_window_receiver) = channel();
|
2018-11-02 08:40:29 -07:00
|
|
|
let store_ledger_stage = StoreLedgerStage::new(entry_window_receiver, ledger_path);
|
2018-11-24 19:32:33 -08:00
|
|
|
|
|
|
|
// Create the RocksDb ledger, eventually will simply repurpose the input
|
|
|
|
// ledger path as the RocksDb ledger path once we replace the ledger with
|
|
|
|
// RocksDb. Note for now, this ledger will not contain any of the existing entries
|
|
|
|
// in the ledger located at ledger_path, and will only append on newly received
|
|
|
|
// entries after being passed to window_service
|
|
|
|
let db_ledger = Arc::new(RwLock::new(
|
|
|
|
DbLedger::open(&ledger_path.unwrap())
|
|
|
|
.expect("Expected to be able to open database ledger"),
|
|
|
|
));
|
|
|
|
|
2018-11-02 08:40:29 -07:00
|
|
|
let gossip_service = GossipService::new(
|
|
|
|
&cluster_info,
|
2018-12-10 01:24:41 -08:00
|
|
|
Some(db_ledger.clone()),
|
2018-11-02 08:40:29 -07:00
|
|
|
node.sockets.gossip,
|
|
|
|
exit.clone(),
|
|
|
|
);
|
|
|
|
|
|
|
|
info!("polling for leader");
|
|
|
|
let leader;
|
|
|
|
loop {
|
|
|
|
if let Some(l) = cluster_info.read().unwrap().get_gossip_top_leader() {
|
|
|
|
leader = l.clone();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
sleep(Duration::from_millis(900));
|
|
|
|
info!("{}", cluster_info.read().unwrap().node_info_trace());
|
|
|
|
}
|
|
|
|
|
|
|
|
info!("Got leader: {:?}", leader);
|
|
|
|
|
2018-12-10 11:38:29 -08:00
|
|
|
let mut storage_last_id;
|
|
|
|
let mut storage_entry_height;
|
|
|
|
loop {
|
|
|
|
let rpc_client = {
|
|
|
|
let cluster_info = cluster_info.read().unwrap();
|
|
|
|
let rpc_peers = cluster_info.rpc_peers();
|
|
|
|
info!("rpc peers: {:?}", rpc_peers);
|
|
|
|
let node_idx = thread_rng().gen_range(0, rpc_peers.len());
|
|
|
|
RpcClient::new_from_socket(rpc_peers[node_idx].rpc)
|
|
|
|
};
|
|
|
|
|
|
|
|
storage_last_id = RpcRequest::GetStorageMiningLastId
|
|
|
|
.make_rpc_request(&rpc_client, 2, None)
|
|
|
|
.expect("rpc request")
|
|
|
|
.to_string();
|
|
|
|
storage_entry_height = RpcRequest::GetStorageMiningEntryHeight
|
|
|
|
.make_rpc_request(&rpc_client, 2, None)
|
|
|
|
.expect("rpc request")
|
|
|
|
.as_u64()
|
|
|
|
.unwrap();
|
|
|
|
if storage_entry_height != 0 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let signature = keypair.sign(storage_last_id.as_ref());
|
|
|
|
let signature = signature.as_ref();
|
|
|
|
let block_index = u64::from(signature[0])
|
|
|
|
| (u64::from(signature[1]) << 8)
|
|
|
|
| (u64::from(signature[1]) << 16)
|
|
|
|
| (u64::from(signature[2]) << 24);
|
|
|
|
let mut entry_height = block_index * ENTRIES_PER_SEGMENT;
|
|
|
|
entry_height %= storage_entry_height;
|
|
|
|
let max_entry_height = entry_height + ENTRIES_PER_SEGMENT;
|
2018-11-02 08:40:29 -07:00
|
|
|
|
|
|
|
let repair_socket = Arc::new(node.sockets.repair);
|
|
|
|
let mut blob_sockets: Vec<Arc<UdpSocket>> =
|
2018-12-07 14:09:29 -08:00
|
|
|
node.sockets.tvu.into_iter().map(Arc::new).collect();
|
2018-11-02 08:40:29 -07:00
|
|
|
blob_sockets.push(repair_socket.clone());
|
|
|
|
let (fetch_stage, blob_fetch_receiver) =
|
|
|
|
BlobFetchStage::new_multi_socket(blob_sockets, exit.clone());
|
|
|
|
|
|
|
|
// todo: pull blobs off the retransmit_receiver and recycle them?
|
|
|
|
let (retransmit_sender, retransmit_receiver) = channel();
|
|
|
|
|
2018-09-21 15:32:15 -07:00
|
|
|
let t_window = window_service(
|
2018-11-24 19:32:33 -08:00
|
|
|
db_ledger,
|
2018-10-08 19:55:54 -07:00
|
|
|
cluster_info.clone(),
|
2018-10-18 22:57:48 -07:00
|
|
|
0,
|
2018-09-21 15:32:15 -07:00
|
|
|
entry_height,
|
2018-09-24 14:10:51 -07:00
|
|
|
max_entry_height,
|
2018-09-21 15:32:15 -07:00
|
|
|
blob_fetch_receiver,
|
2018-09-21 16:50:58 -07:00
|
|
|
entry_window_sender,
|
2018-09-21 15:32:15 -07:00
|
|
|
retransmit_sender,
|
|
|
|
repair_socket,
|
2018-10-10 16:49:41 -07:00
|
|
|
Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
|
|
|
leader_pubkey,
|
|
|
|
))),
|
2018-11-02 08:40:29 -07:00
|
|
|
done.clone(),
|
2018-09-21 15:32:15 -07:00
|
|
|
);
|
|
|
|
|
2018-11-02 08:40:29 -07:00
|
|
|
info!("window created, waiting for ledger download done");
|
|
|
|
while !done.load(Ordering::Relaxed) {
|
|
|
|
sleep(Duration::from_millis(100));
|
|
|
|
}
|
2018-09-21 15:32:15 -07:00
|
|
|
|
2018-12-10 11:38:29 -08:00
|
|
|
let mut node_info = node.info.clone();
|
|
|
|
node_info.tvu = "0.0.0.0:0".parse().unwrap();
|
|
|
|
{
|
|
|
|
let mut cluster_info_w = cluster_info.write().unwrap();
|
|
|
|
cluster_info_w.insert_info(node_info);
|
|
|
|
}
|
|
|
|
|
2018-11-02 08:40:29 -07:00
|
|
|
let mut client = mk_client(&leader);
|
|
|
|
|
2018-12-10 11:21:28 -08:00
|
|
|
if retry_get_balance(&mut client, &keypair.pubkey(), None).is_none() {
|
2018-11-02 08:40:29 -07:00
|
|
|
let mut drone_addr = leader_info.tpu;
|
|
|
|
drone_addr.set_port(DRONE_PORT);
|
|
|
|
|
|
|
|
let airdrop_amount = 1;
|
|
|
|
|
|
|
|
let last_id = client.get_last_id();
|
|
|
|
match request_airdrop_transaction(
|
|
|
|
&drone_addr,
|
|
|
|
&keypair.pubkey(),
|
|
|
|
airdrop_amount,
|
|
|
|
last_id,
|
|
|
|
) {
|
|
|
|
Ok(transaction) => {
|
|
|
|
let signature = client.transfer_signed(&transaction).unwrap();
|
|
|
|
client.poll_for_signature(&signature).unwrap();
|
|
|
|
}
|
|
|
|
Err(err) => {
|
|
|
|
panic!(
|
|
|
|
"Error requesting airdrop: {:?} to addr: {:?} amount: {}",
|
|
|
|
err, drone_addr, airdrop_amount
|
|
|
|
);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
info!("Done downloading ledger at {}", ledger_path.unwrap());
|
|
|
|
|
|
|
|
let ledger_path = Path::new(ledger_path.unwrap());
|
|
|
|
let ledger_data_file_encrypted = ledger_path.join(format!("{}.enc", LEDGER_DATA_FILE));
|
|
|
|
#[cfg(feature = "chacha")]
|
|
|
|
{
|
|
|
|
let ledger_data_file = ledger_path.join(LEDGER_DATA_FILE);
|
|
|
|
let mut ivec = [0u8; CHACHA_BLOCK_SIZE];
|
|
|
|
ivec[0..4].copy_from_slice(&[2, 3, 4, 5]);
|
|
|
|
|
|
|
|
chacha_cbc_encrypt_file(&ledger_data_file, &ledger_data_file_encrypted, &mut ivec)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
info!("Done encrypting the ledger");
|
|
|
|
|
|
|
|
let sampling_offsets = [0, 1, 2, 3];
|
|
|
|
|
|
|
|
match sample_file(&ledger_data_file_encrypted, &sampling_offsets) {
|
|
|
|
Ok(hash) => {
|
|
|
|
let last_id = client.get_last_id();
|
|
|
|
info!("sampled hash: {}", hash);
|
2018-12-10 11:38:29 -08:00
|
|
|
let tx =
|
|
|
|
Transaction::storage_new_mining_proof(&keypair, hash, last_id, entry_height);
|
2018-11-02 08:40:29 -07:00
|
|
|
client.transfer_signed(&tx).expect("transfer didn't work!");
|
|
|
|
}
|
|
|
|
Err(e) => info!("Error occurred while sampling: {:?}", e),
|
|
|
|
}
|
|
|
|
|
2018-12-07 19:01:28 -08:00
|
|
|
Ok(Self {
|
2018-11-02 08:40:29 -07:00
|
|
|
gossip_service,
|
|
|
|
fetch_stage,
|
|
|
|
store_ledger_stage,
|
|
|
|
t_window,
|
|
|
|
retransmit_receiver,
|
|
|
|
exit,
|
|
|
|
})
|
|
|
|
}
|
2018-09-21 15:32:15 -07:00
|
|
|
|
2018-11-02 08:40:29 -07:00
|
|
|
pub fn close(self) {
|
|
|
|
self.exit.store(true, Ordering::Relaxed);
|
|
|
|
self.join()
|
2018-09-21 15:32:15 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn join(self) {
|
2018-12-06 12:52:47 -08:00
|
|
|
self.gossip_service.join().unwrap();
|
2018-09-21 15:32:15 -07:00
|
|
|
self.fetch_stage.join().unwrap();
|
2018-09-21 18:56:20 -07:00
|
|
|
self.t_window.join().unwrap();
|
2018-09-21 15:32:15 -07:00
|
|
|
self.store_ledger_stage.join().unwrap();
|
2018-09-24 11:50:37 -07:00
|
|
|
|
|
|
|
// Drain the queue here to prevent self.retransmit_receiver from being dropped
|
|
|
|
// before the window_service thread is joined
|
|
|
|
let mut retransmit_queue_count = 0;
|
|
|
|
while let Ok(_blob) = self.retransmit_receiver.recv_timeout(Duration::new(1, 0)) {
|
|
|
|
retransmit_queue_count += 1;
|
|
|
|
}
|
|
|
|
debug!("retransmit channel count: {}", retransmit_queue_count);
|
2018-09-21 15:32:15 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2018-12-14 12:36:50 -08:00
|
|
|
|
2018-12-07 19:16:27 -08:00
|
|
|
use crate::replicator::sample_file;
|
2018-11-16 08:04:46 -08:00
|
|
|
use solana_sdk::hash::Hash;
|
2018-12-03 10:26:28 -08:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2018-10-02 11:47:51 -07:00
|
|
|
use std::fs::File;
|
2018-11-29 09:29:17 -08:00
|
|
|
use std::fs::{create_dir_all, remove_file};
|
2018-10-02 11:47:51 -07:00
|
|
|
use std::io::Write;
|
|
|
|
use std::mem::size_of;
|
2018-10-08 16:15:17 -07:00
|
|
|
use std::path::PathBuf;
|
2018-10-02 11:47:51 -07:00
|
|
|
|
2018-10-08 16:15:17 -07:00
|
|
|
fn tmp_file_path(name: &str) -> PathBuf {
|
|
|
|
use std::env;
|
|
|
|
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
|
|
|
let keypair = Keypair::new();
|
|
|
|
|
|
|
|
let mut path = PathBuf::new();
|
|
|
|
path.push(out_dir);
|
|
|
|
path.push("tmp");
|
|
|
|
create_dir_all(&path).unwrap();
|
|
|
|
|
|
|
|
path.push(format!("{}-{}", name, keypair.pubkey()));
|
|
|
|
path
|
|
|
|
}
|
|
|
|
|
2018-10-02 11:47:51 -07:00
|
|
|
#[test]
|
|
|
|
fn test_sample_file() {
|
2018-12-14 12:36:50 -08:00
|
|
|
solana_logger::setup();
|
2018-10-08 16:15:17 -07:00
|
|
|
let in_path = tmp_file_path("test_sample_file_input.txt");
|
2018-10-02 11:47:51 -07:00
|
|
|
let num_strings = 4096;
|
|
|
|
let string = "12foobar";
|
|
|
|
{
|
2018-10-08 16:15:17 -07:00
|
|
|
let mut in_file = File::create(&in_path).unwrap();
|
2018-10-02 11:47:51 -07:00
|
|
|
for _ in 0..num_strings {
|
|
|
|
in_file.write(string.as_bytes()).unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let num_samples = (string.len() * num_strings / size_of::<Hash>()) as u64;
|
|
|
|
let samples: Vec<_> = (0..num_samples).collect();
|
2018-10-08 16:15:17 -07:00
|
|
|
let res = sample_file(&in_path, samples.as_slice());
|
2018-10-02 11:47:51 -07:00
|
|
|
assert!(res.is_ok());
|
|
|
|
let ref_hash: Hash = Hash::new(&[
|
|
|
|
173, 251, 182, 165, 10, 54, 33, 150, 133, 226, 106, 150, 99, 192, 179, 1, 230, 144,
|
|
|
|
151, 126, 18, 191, 54, 67, 249, 140, 230, 160, 56, 30, 170, 52,
|
|
|
|
]);
|
|
|
|
let res = res.unwrap();
|
|
|
|
assert_eq!(res, ref_hash);
|
|
|
|
|
|
|
|
// Sample just past the end
|
2018-10-08 16:15:17 -07:00
|
|
|
assert!(sample_file(&in_path, &[num_samples]).is_err());
|
|
|
|
remove_file(&in_path).unwrap();
|
2018-10-02 11:47:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_sample_file_invalid_offset() {
|
2018-10-08 16:15:17 -07:00
|
|
|
let in_path = tmp_file_path("test_sample_file_invalid_offset_input.txt");
|
2018-10-02 11:47:51 -07:00
|
|
|
{
|
2018-10-08 16:15:17 -07:00
|
|
|
let mut in_file = File::create(&in_path).unwrap();
|
2018-10-02 11:47:51 -07:00
|
|
|
for _ in 0..4096 {
|
|
|
|
in_file.write("123456foobar".as_bytes()).unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let samples = [0, 200000];
|
2018-10-08 16:15:17 -07:00
|
|
|
let res = sample_file(&in_path, &samples);
|
2018-10-02 11:47:51 -07:00
|
|
|
assert!(res.is_err());
|
|
|
|
remove_file(in_path).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_sample_file_missing_file() {
|
2018-10-08 16:15:17 -07:00
|
|
|
let in_path = tmp_file_path("test_sample_file_that_doesnt_exist.txt");
|
2018-10-02 11:47:51 -07:00
|
|
|
let samples = [0, 5];
|
2018-10-08 16:15:17 -07:00
|
|
|
let res = sample_file(&in_path, &samples);
|
2018-10-02 11:47:51 -07:00
|
|
|
assert!(res.is_err());
|
|
|
|
}
|
|
|
|
|
2018-09-21 15:32:15 -07:00
|
|
|
}
|