dynamit network test
* cleaned up fullnode api * added debug_id to ReplicatedData and crdt for debugging
This commit is contained in:
parent
033f6dcbcb
commit
3a90f138b2
|
@ -15,4 +15,3 @@ _ rustup component add rustfmt-preview
|
||||||
_ cargo fmt -- --write-mode=check
|
_ cargo fmt -- --write-mode=check
|
||||||
_ cargo build --verbose
|
_ cargo build --verbose
|
||||||
_ cargo test --verbose
|
_ cargo test --verbose
|
||||||
_ cargo test -- --ignored
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ extern crate solana;
|
||||||
use atty::{is, Stream};
|
use atty::{is, Stream};
|
||||||
use getopts::Options;
|
use getopts::Options;
|
||||||
use solana::crdt::{ReplicatedData, TestNode};
|
use solana::crdt::{ReplicatedData, TestNode};
|
||||||
use solana::fullnode::FullNode;
|
use solana::fullnode::{FullNode, LedgerFile};
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
|
@ -83,12 +83,23 @@ fn main() -> () {
|
||||||
let fullnode = if matches.opt_present("t") {
|
let fullnode = if matches.opt_present("t") {
|
||||||
let testnet_address_string = matches.opt_str("t").unwrap();
|
let testnet_address_string = matches.opt_str("t").unwrap();
|
||||||
let testnet_addr = testnet_address_string.parse().unwrap();
|
let testnet_addr = testnet_address_string.parse().unwrap();
|
||||||
FullNode::new(node, false, None, Some(testnet_addr), None, exit)
|
FullNode::new(
|
||||||
|
node,
|
||||||
|
false,
|
||||||
|
LedgerFile::StdIn,
|
||||||
|
Some(testnet_addr),
|
||||||
|
LedgerFile::NoFile,
|
||||||
|
exit,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
node.data.current_leader_id = node.data.id.clone();
|
node.data.current_leader_id = node.data.id.clone();
|
||||||
|
|
||||||
let outfile = matches.opt_str("o");
|
let outfile = if let Some(f) = matches.opt_str("o") {
|
||||||
FullNode::new(node, true, None, None, outfile, exit)
|
LedgerFile::Path(f)
|
||||||
|
} else {
|
||||||
|
LedgerFile::StdIn
|
||||||
|
};
|
||||||
|
FullNode::new(node, true, LedgerFile::StdIn, None, outfile, exit)
|
||||||
};
|
};
|
||||||
for t in fullnode.thread_hdls {
|
for t in fullnode.thread_hdls {
|
||||||
t.join().expect("join");
|
t.join().expect("join");
|
||||||
|
|
89
src/crdt.rs
89
src/crdt.rs
|
@ -109,6 +109,12 @@ pub struct ReplicatedData {
|
||||||
last_verified_count: u64,
|
last_verified_count: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn make_debug_id(buf: &[u8]) -> u64 {
|
||||||
|
let mut rdr = Cursor::new(&buf[..8]);
|
||||||
|
rdr.read_u64::<LittleEndian>()
|
||||||
|
.expect("rdr.read_u64 in fn debug_id")
|
||||||
|
}
|
||||||
|
|
||||||
impl ReplicatedData {
|
impl ReplicatedData {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
id: PublicKey,
|
id: PublicKey,
|
||||||
|
@ -132,7 +138,9 @@ impl ReplicatedData {
|
||||||
last_verified_count: 0,
|
last_verified_count: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pub fn debug_id(&self) -> u64 {
|
||||||
|
make_debug_id(&self.id)
|
||||||
|
}
|
||||||
fn next_port(addr: &SocketAddr, nxt: u16) -> SocketAddr {
|
fn next_port(addr: &SocketAddr, nxt: u16) -> SocketAddr {
|
||||||
let mut nxt_addr = addr.clone();
|
let mut nxt_addr = addr.clone();
|
||||||
nxt_addr.set_port(addr.port() + nxt);
|
nxt_addr.set_port(addr.port() + nxt);
|
||||||
|
@ -224,6 +232,9 @@ impl Crdt {
|
||||||
g.table.insert(me.id, me);
|
g.table.insert(me.id, me);
|
||||||
g
|
g
|
||||||
}
|
}
|
||||||
|
pub fn debug_id(&self) -> u64 {
|
||||||
|
make_debug_id(&self.me)
|
||||||
|
}
|
||||||
pub fn my_data(&self) -> &ReplicatedData {
|
pub fn my_data(&self) -> &ReplicatedData {
|
||||||
&self.table[&self.me]
|
&self.table[&self.me]
|
||||||
}
|
}
|
||||||
|
@ -233,9 +244,14 @@ impl Crdt {
|
||||||
|
|
||||||
pub fn set_leader(&mut self, key: PublicKey) -> () {
|
pub fn set_leader(&mut self, key: PublicKey) -> () {
|
||||||
let mut me = self.my_data().clone();
|
let mut me = self.my_data().clone();
|
||||||
|
info!(
|
||||||
|
"{:x}: setting leader to {:x} from {:x}",
|
||||||
|
me.debug_id(),
|
||||||
|
make_debug_id(&key),
|
||||||
|
make_debug_id(&me.current_leader_id),
|
||||||
|
);
|
||||||
me.current_leader_id = key;
|
me.current_leader_id = key;
|
||||||
me.version += 1;
|
me.version += 1;
|
||||||
info!("setting leader to {:?}", &key[..4]);
|
|
||||||
self.insert(&me);
|
self.insert(&me);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -249,9 +265,9 @@ impl Crdt {
|
||||||
//somehow we signed a message for our own identity with a higher version that
|
//somehow we signed a message for our own identity with a higher version that
|
||||||
// we have stored ourselves
|
// we have stored ourselves
|
||||||
trace!(
|
trace!(
|
||||||
"me: {:?} v.id: {:?} version: {}",
|
"me: {:x} v.id: {:x} version: {}",
|
||||||
&self.me[..4],
|
self.debug_id(),
|
||||||
&v.id[..4],
|
v.debug_id(),
|
||||||
v.version
|
v.version
|
||||||
);
|
);
|
||||||
self.update_index += 1;
|
self.update_index += 1;
|
||||||
|
@ -259,9 +275,9 @@ impl Crdt {
|
||||||
let _ = self.local.insert(v.id, self.update_index);
|
let _ = self.local.insert(v.id, self.update_index);
|
||||||
} else {
|
} else {
|
||||||
trace!(
|
trace!(
|
||||||
"INSERT FAILED me: {:?} data: {:?} new.version: {} me.version: {}",
|
"INSERT FAILED me: {:x} data: {:?} new.version: {} me.version: {}",
|
||||||
&self.me[..4],
|
self.debug_id(),
|
||||||
&v.id[..4],
|
v.debug_id(),
|
||||||
v.version,
|
v.version,
|
||||||
self.table[&v.id].version
|
self.table[&v.id].version
|
||||||
);
|
);
|
||||||
|
@ -289,10 +305,15 @@ impl Crdt {
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(&k, v)| {
|
.filter_map(|(&k, v)| {
|
||||||
if k != self.me && (now - v) > limit {
|
if k != self.me && (now - v) > limit {
|
||||||
info!("purge {:?} {}", &k[..4], now - v);
|
info!("purge {:x} {}", make_debug_id(&k), now - v);
|
||||||
Some(k)
|
Some(k)
|
||||||
} else {
|
} else {
|
||||||
trace!("purge skipped {:?} {} {}", &k[..4], now - v, limit);
|
trace!(
|
||||||
|
"purge skipped {:x} {} {}",
|
||||||
|
make_debug_id(&k),
|
||||||
|
now - v,
|
||||||
|
limit
|
||||||
|
);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -317,7 +338,11 @@ impl Crdt {
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let me: ReplicatedData = {
|
let me: ReplicatedData = {
|
||||||
let robj = obj.read().expect("'obj' read lock in crdt::index_blobs");
|
let robj = obj.read().expect("'obj' read lock in crdt::index_blobs");
|
||||||
debug!("broadcast table {}", robj.table.len());
|
debug!(
|
||||||
|
"{:x}: broadcast table {}",
|
||||||
|
robj.debug_id(),
|
||||||
|
robj.table.len()
|
||||||
|
);
|
||||||
robj.table[&robj.me].clone()
|
robj.table[&robj.me].clone()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -454,10 +479,11 @@ impl Crdt {
|
||||||
let errs: Vec<_> = orders
|
let errs: Vec<_> = orders
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|v| {
|
.map(|v| {
|
||||||
trace!(
|
debug!(
|
||||||
"retransmit blob {} to {}",
|
"{:x}: retransmit blob {} to {:x}",
|
||||||
|
me.debug_id(),
|
||||||
rblob.get_index().unwrap(),
|
rblob.get_index().unwrap(),
|
||||||
v.replicate_addr
|
v.debug_id(),
|
||||||
);
|
);
|
||||||
//TODO profile this, may need multiple sockets for par_iter
|
//TODO profile this, may need multiple sockets for par_iter
|
||||||
assert!(rblob.meta.size < BLOB_SIZE);
|
assert!(rblob.meta.size < BLOB_SIZE);
|
||||||
|
@ -556,9 +582,9 @@ impl Crdt {
|
||||||
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
|
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
|
||||||
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
|
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
|
||||||
trace!(
|
trace!(
|
||||||
"created gossip request from {:?} to {:?} {}",
|
"created gossip request from {:x} to {:x} {}",
|
||||||
&self.me[..4],
|
self.debug_id(),
|
||||||
&v.id[..4],
|
v.debug_id(),
|
||||||
v.gossip_addr
|
v.gossip_addr
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -596,11 +622,17 @@ impl Crdt {
|
||||||
for v in cur {
|
for v in cur {
|
||||||
let cnt = table.entry(&v.current_leader_id).or_insert(0);
|
let cnt = table.entry(&v.current_leader_id).or_insert(0);
|
||||||
*cnt += 1;
|
*cnt += 1;
|
||||||
trace!("leader {:?} {}", &v.current_leader_id[..4], *cnt);
|
trace!("leader {:x} {}", make_debug_id(&v.current_leader_id), *cnt);
|
||||||
}
|
}
|
||||||
let mut sorted: Vec<(&PublicKey, usize)> = table.into_iter().collect();
|
let mut sorted: Vec<(&PublicKey, usize)> = table.into_iter().collect();
|
||||||
if sorted.len() > 0 {
|
let my_id = self.debug_id();
|
||||||
debug!("sorted leaders {:?}", sorted);
|
for x in sorted.iter() {
|
||||||
|
trace!(
|
||||||
|
"{:x}: sorted leaders {:x} votes: {}",
|
||||||
|
my_id,
|
||||||
|
make_debug_id(&x.0),
|
||||||
|
x.1
|
||||||
|
);
|
||||||
}
|
}
|
||||||
sorted.sort_by_key(|a| a.1);
|
sorted.sort_by_key(|a| a.1);
|
||||||
sorted.last().map(|a| *a.0)
|
sorted.last().map(|a| *a.0)
|
||||||
|
@ -769,18 +801,18 @@ impl Crdt {
|
||||||
if len < 1 {
|
if len < 1 {
|
||||||
let me = obj.read().unwrap();
|
let me = obj.read().unwrap();
|
||||||
trace!(
|
trace!(
|
||||||
"no updates me {:?} ix {} since {}",
|
"no updates me {:x} ix {} since {}",
|
||||||
&me.me[..4],
|
me.debug_id(),
|
||||||
me.update_index,
|
me.update_index,
|
||||||
v
|
v
|
||||||
);
|
);
|
||||||
None
|
None
|
||||||
} else if let Ok(r) = to_blob(rsp, addr, &blob_recycler) {
|
} else if let Ok(r) = to_blob(rsp, addr, &blob_recycler) {
|
||||||
trace!(
|
trace!(
|
||||||
"sending updates me {:?} len {} to {:?} {}",
|
"sending updates me {:x} len {} to {:x} {}",
|
||||||
&obj.read().unwrap().me[..4],
|
obj.read().unwrap().debug_id(),
|
||||||
len,
|
len,
|
||||||
&from_rd.id[..4],
|
from_rd.debug_id(),
|
||||||
addr,
|
addr,
|
||||||
);
|
);
|
||||||
Some(r)
|
Some(r)
|
||||||
|
@ -790,7 +822,12 @@ impl Crdt {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(Protocol::ReceiveUpdates(from, ups, data, external_liveness)) => {
|
Ok(Protocol::ReceiveUpdates(from, ups, data, external_liveness)) => {
|
||||||
trace!("ReceivedUpdates {:?} {} {}", &from[0..4], ups, data.len());
|
trace!(
|
||||||
|
"ReceivedUpdates {:x} {} {}",
|
||||||
|
make_debug_id(&from),
|
||||||
|
ups,
|
||||||
|
data.len()
|
||||||
|
);
|
||||||
obj.write()
|
obj.write()
|
||||||
.expect("'obj' write lock in ReceiveUpdates")
|
.expect("'obj' write lock in ReceiveUpdates")
|
||||||
.apply_updates(from, ups, &data, &external_liveness);
|
.apply_updates(from, ups, &data, &external_liveness);
|
||||||
|
|
16
src/drone.rs
16
src/drone.rs
|
@ -245,17 +245,13 @@ mod tests {
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let carlos_pubkey = KeyPair::new().pubkey();
|
let carlos_pubkey = KeyPair::new().pubkey();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let leader_data = leader.data.clone();
|
||||||
|
|
||||||
let server = FullNode::new_leader(
|
let server = FullNode::new_leader(
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
Some(Duration::from_millis(30)),
|
Some(Duration::from_millis(30)),
|
||||||
leader.data.clone(),
|
leader,
|
||||||
leader.sockets.requests,
|
|
||||||
leader.sockets.transaction,
|
|
||||||
leader.sockets.broadcast,
|
|
||||||
leader.sockets.respond,
|
|
||||||
leader.sockets.gossip,
|
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
sink(),
|
sink(),
|
||||||
);
|
);
|
||||||
|
@ -266,8 +262,8 @@ mod tests {
|
||||||
let mut drone = Drone::new(
|
let mut drone = Drone::new(
|
||||||
alice.keypair(),
|
alice.keypair(),
|
||||||
addr,
|
addr,
|
||||||
leader.data.transactions_addr,
|
leader_data.transactions_addr,
|
||||||
leader.data.requests_addr,
|
leader_data.requests_addr,
|
||||||
None,
|
None,
|
||||||
Some(5_000_050),
|
Some(5_000_050),
|
||||||
);
|
);
|
||||||
|
@ -291,9 +287,9 @@ mod tests {
|
||||||
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
|
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
|
||||||
|
|
||||||
let mut client = ThinClient::new(
|
let mut client = ThinClient::new(
|
||||||
leader.data.requests_addr,
|
leader_data.requests_addr,
|
||||||
requests_socket,
|
requests_socket,
|
||||||
leader.data.transactions_addr,
|
leader_data.transactions_addr,
|
||||||
transactions_socket,
|
transactions_socket,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
193
src/fullnode.rs
193
src/fullnode.rs
|
@ -8,9 +8,8 @@ use packet::BlobRecycler;
|
||||||
use rpu::Rpu;
|
use rpu::Rpu;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::io::{stdin, stdout, BufReader};
|
use std::io::{sink, stdin, stdout, BufReader};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::net::UdpSocket;
|
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
|
@ -24,30 +23,42 @@ pub struct FullNode {
|
||||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub enum LedgerFile {
|
||||||
|
NoFile,
|
||||||
|
StdIn,
|
||||||
|
StdOut,
|
||||||
|
Sink,
|
||||||
|
Path(String),
|
||||||
|
}
|
||||||
|
|
||||||
impl FullNode {
|
impl FullNode {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
mut node: TestNode,
|
mut node: TestNode,
|
||||||
leader: bool,
|
leader: bool,
|
||||||
infile: Option<String>,
|
infile: LedgerFile,
|
||||||
network_entry_for_validator: Option<SocketAddr>,
|
network_entry_for_validator: Option<SocketAddr>,
|
||||||
outfile_for_leader: Option<String>,
|
outfile_for_leader: LedgerFile,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> FullNode {
|
) -> FullNode {
|
||||||
info!("creating bank...");
|
info!("creating bank...");
|
||||||
let bank = Bank::default();
|
let bank = Bank::default();
|
||||||
let entry_height = if let Some(path) = infile {
|
let entry_height = match infile {
|
||||||
let f = File::open(path).unwrap();
|
LedgerFile::Path(path) => {
|
||||||
let mut r = BufReader::new(f);
|
let f = File::open(path).unwrap();
|
||||||
let entries =
|
let mut r = BufReader::new(f);
|
||||||
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
let entries =
|
||||||
info!("processing ledger...");
|
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
||||||
bank.process_ledger(entries).expect("process_ledger")
|
info!("processing ledger...");
|
||||||
} else {
|
bank.process_ledger(entries).expect("process_ledger")
|
||||||
let mut r = BufReader::new(stdin());
|
}
|
||||||
let entries =
|
LedgerFile::StdIn => {
|
||||||
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
let mut r = BufReader::new(stdin());
|
||||||
info!("processing ledger...");
|
let entries =
|
||||||
bank.process_ledger(entries).expect("process_ledger")
|
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
||||||
|
info!("processing ledger...");
|
||||||
|
bank.process_ledger(entries).expect("process_ledger")
|
||||||
|
}
|
||||||
|
_ => panic!("expected LedgerFile::StdIn or LedgerFile::Path for infile"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// entry_height is the network-wide agreed height of the ledger.
|
// entry_height is the network-wide agreed height of the ledger.
|
||||||
|
@ -62,6 +73,7 @@ impl FullNode {
|
||||||
"starting... local gossip address: {} (advertising {})",
|
"starting... local gossip address: {} (advertising {})",
|
||||||
local_gossip_addr, node.data.gossip_addr
|
local_gossip_addr, node.data.gossip_addr
|
||||||
);
|
);
|
||||||
|
let requests_addr = node.data.requests_addr.clone();
|
||||||
if !leader {
|
if !leader {
|
||||||
let testnet_addr = network_entry_for_validator.expect("validator requires entry");
|
let testnet_addr = network_entry_for_validator.expect("validator requires entry");
|
||||||
|
|
||||||
|
@ -69,56 +81,57 @@ impl FullNode {
|
||||||
let server = FullNode::new_validator(
|
let server = FullNode::new_validator(
|
||||||
bank,
|
bank,
|
||||||
entry_height,
|
entry_height,
|
||||||
node.data.clone(),
|
node,
|
||||||
node.sockets.requests,
|
|
||||||
node.sockets.respond,
|
|
||||||
node.sockets.replicate,
|
|
||||||
node.sockets.gossip,
|
|
||||||
node.sockets.repair,
|
|
||||||
network_entry_point,
|
network_entry_point,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
);
|
);
|
||||||
info!(
|
info!(
|
||||||
"validator ready... local request address: {} (advertising {}) connected to: {}",
|
"validator ready... local request address: {} (advertising {}) connected to: {}",
|
||||||
local_requests_addr, node.data.requests_addr, testnet_addr
|
local_requests_addr, requests_addr, testnet_addr
|
||||||
);
|
);
|
||||||
server
|
server
|
||||||
} else {
|
} else {
|
||||||
node.data.current_leader_id = node.data.id.clone();
|
node.data.current_leader_id = node.data.id.clone();
|
||||||
let server = if let Some(file) = outfile_for_leader {
|
let server =
|
||||||
FullNode::new_leader(
|
match outfile_for_leader {
|
||||||
bank,
|
LedgerFile::Path(file) => {
|
||||||
entry_height,
|
FullNode::new_leader(
|
||||||
//Some(Duration::from_millis(1000)),
|
bank,
|
||||||
None,
|
entry_height,
|
||||||
node.data.clone(),
|
//Some(Duration::from_millis(1000)),
|
||||||
node.sockets.requests,
|
None,
|
||||||
node.sockets.transaction,
|
node,
|
||||||
node.sockets.broadcast,
|
exit.clone(),
|
||||||
node.sockets.respond,
|
File::create(file).expect("opening ledger file"),
|
||||||
node.sockets.gossip,
|
)
|
||||||
exit.clone(),
|
},
|
||||||
File::create(file).expect("opening ledger file"),
|
LedgerFile::StdOut => {
|
||||||
)
|
FullNode::new_leader(
|
||||||
} else {
|
bank,
|
||||||
FullNode::new_leader(
|
entry_height,
|
||||||
bank,
|
//Some(Duration::from_millis(1000)),
|
||||||
entry_height,
|
None,
|
||||||
//Some(Duration::from_millis(1000)),
|
node,
|
||||||
None,
|
exit.clone(),
|
||||||
node.data.clone(),
|
stdout(),
|
||||||
node.sockets.requests,
|
)
|
||||||
node.sockets.transaction,
|
},
|
||||||
node.sockets.broadcast,
|
LedgerFile::Sink => {
|
||||||
node.sockets.respond,
|
FullNode::new_leader(
|
||||||
node.sockets.gossip,
|
bank,
|
||||||
exit.clone(),
|
entry_height,
|
||||||
stdout(),
|
//Some(Duration::from_millis(1000)),
|
||||||
)
|
None,
|
||||||
};
|
node,
|
||||||
|
exit.clone(),
|
||||||
|
sink(),
|
||||||
|
)
|
||||||
|
},
|
||||||
|
_ => panic!("expected LedgerFile::StdOut, LedgerFile::Path, or LedgerFile::Sink, for infile"),
|
||||||
|
};
|
||||||
info!(
|
info!(
|
||||||
"leader ready... local request address: {} (advertising {})",
|
"leader ready... local request address: {} (advertising {})",
|
||||||
local_requests_addr, node.data.requests_addr
|
local_requests_addr, requests_addr
|
||||||
);
|
);
|
||||||
server
|
server
|
||||||
}
|
}
|
||||||
|
@ -151,45 +164,43 @@ impl FullNode {
|
||||||
bank: Bank,
|
bank: Bank,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
tick_duration: Option<Duration>,
|
tick_duration: Option<Duration>,
|
||||||
me: ReplicatedData,
|
node: TestNode,
|
||||||
requests_socket: UdpSocket,
|
|
||||||
transactions_socket: UdpSocket,
|
|
||||||
broadcast_socket: UdpSocket,
|
|
||||||
respond_socket: UdpSocket,
|
|
||||||
gossip_socket: UdpSocket,
|
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
writer: W,
|
writer: W,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let bank = Arc::new(bank);
|
let bank = Arc::new(bank);
|
||||||
let mut thread_hdls = vec![];
|
let mut thread_hdls = vec![];
|
||||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
let rpu = Rpu::new(
|
||||||
|
bank.clone(),
|
||||||
|
node.sockets.requests,
|
||||||
|
node.sockets.respond,
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
thread_hdls.extend(rpu.thread_hdls);
|
thread_hdls.extend(rpu.thread_hdls);
|
||||||
|
|
||||||
let blob_recycler = BlobRecycler::default();
|
let blob_recycler = BlobRecycler::default();
|
||||||
let (tpu, blob_receiver) = Tpu::new(
|
let (tpu, blob_receiver) = Tpu::new(
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
tick_duration,
|
tick_duration,
|
||||||
transactions_socket,
|
node.sockets.transaction,
|
||||||
blob_recycler.clone(),
|
blob_recycler.clone(),
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
writer,
|
writer,
|
||||||
);
|
);
|
||||||
thread_hdls.extend(tpu.thread_hdls);
|
thread_hdls.extend(tpu.thread_hdls);
|
||||||
|
let crdt = Arc::new(RwLock::new(Crdt::new(node.data)));
|
||||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
|
||||||
let window = streamer::default_window();
|
let window = streamer::default_window();
|
||||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
|
||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
window.clone(),
|
window.clone(),
|
||||||
gossip_socket,
|
node.sockets.gossip,
|
||||||
gossip_send_socket,
|
node.sockets.gossip_send,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
).expect("Ncp::new");
|
).expect("Ncp::new");
|
||||||
thread_hdls.extend(ncp.thread_hdls);
|
thread_hdls.extend(ncp.thread_hdls);
|
||||||
|
|
||||||
let t_broadcast = streamer::broadcaster(
|
let t_broadcast = streamer::broadcaster(
|
||||||
broadcast_socket,
|
node.sockets.broadcast,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
crdt,
|
crdt,
|
||||||
window,
|
window,
|
||||||
|
@ -234,32 +245,30 @@ impl FullNode {
|
||||||
pub fn new_validator(
|
pub fn new_validator(
|
||||||
bank: Bank,
|
bank: Bank,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
me: ReplicatedData,
|
node: TestNode,
|
||||||
requests_socket: UdpSocket,
|
|
||||||
respond_socket: UdpSocket,
|
|
||||||
replicate_socket: UdpSocket,
|
|
||||||
gossip_listen_socket: UdpSocket,
|
|
||||||
repair_socket: UdpSocket,
|
|
||||||
entry_point: ReplicatedData,
|
entry_point: ReplicatedData,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let bank = Arc::new(bank);
|
let bank = Arc::new(bank);
|
||||||
let mut thread_hdls = vec![];
|
let mut thread_hdls = vec![];
|
||||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
let rpu = Rpu::new(
|
||||||
|
bank.clone(),
|
||||||
|
node.sockets.requests,
|
||||||
|
node.sockets.respond,
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
thread_hdls.extend(rpu.thread_hdls);
|
thread_hdls.extend(rpu.thread_hdls);
|
||||||
|
|
||||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
let crdt = Arc::new(RwLock::new(Crdt::new(node.data)));
|
||||||
crdt.write()
|
crdt.write()
|
||||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||||
.insert(&entry_point);
|
.insert(&entry_point);
|
||||||
let window = streamer::default_window();
|
let window = streamer::default_window();
|
||||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
|
||||||
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
|
||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
window.clone(),
|
window.clone(),
|
||||||
gossip_listen_socket,
|
node.sockets.gossip,
|
||||||
gossip_send_socket,
|
node.sockets.gossip_send,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
).expect("Ncp::new");
|
).expect("Ncp::new");
|
||||||
|
|
||||||
|
@ -268,9 +277,9 @@ impl FullNode {
|
||||||
entry_height,
|
entry_height,
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
window.clone(),
|
window.clone(),
|
||||||
replicate_socket,
|
node.sockets.replicate,
|
||||||
repair_socket,
|
node.sockets.repair,
|
||||||
retransmit_socket,
|
node.sockets.retransmit,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
);
|
);
|
||||||
thread_hdls.extend(tvu.thread_hdls);
|
thread_hdls.extend(tvu.thread_hdls);
|
||||||
|
@ -292,18 +301,8 @@ mod tests {
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let v = FullNode::new_validator(
|
let entry = tn.data.clone();
|
||||||
bank,
|
let v = FullNode::new_validator(bank, 0, tn, entry, exit.clone());
|
||||||
0,
|
|
||||||
tn.data.clone(),
|
|
||||||
tn.sockets.requests,
|
|
||||||
tn.sockets.respond,
|
|
||||||
tn.sockets.replicate,
|
|
||||||
tn.sockets.gossip,
|
|
||||||
tn.sockets.repair,
|
|
||||||
tn.data,
|
|
||||||
exit.clone(),
|
|
||||||
);
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
for t in v.thread_hdls {
|
for t in v.thread_hdls {
|
||||||
t.join().unwrap();
|
t.join().unwrap();
|
||||||
|
|
112
src/streamer.rs
112
src/streamer.rs
|
@ -1,6 +1,6 @@
|
||||||
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
|
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
|
||||||
//!
|
//!
|
||||||
use crdt::Crdt;
|
use crdt::{Crdt, ReplicatedData};
|
||||||
#[cfg(feature = "erasure")]
|
#[cfg(feature = "erasure")]
|
||||||
use erasure;
|
use erasure;
|
||||||
use packet::{
|
use packet::{
|
||||||
|
@ -92,7 +92,7 @@ pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize)>
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
debug!("batch len {}", batch.len());
|
trace!("batch len {}", batch.len());
|
||||||
Ok((batch, len))
|
Ok((batch, len))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,6 +171,7 @@ fn find_next_missing(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn repair_window(
|
fn repair_window(
|
||||||
|
debug_id: u64,
|
||||||
locked_window: &Window,
|
locked_window: &Window,
|
||||||
crdt: &Arc<RwLock<Crdt>>,
|
crdt: &Arc<RwLock<Crdt>>,
|
||||||
_recycler: &BlobRecycler,
|
_recycler: &BlobRecycler,
|
||||||
|
@ -199,14 +200,33 @@ fn repair_window(
|
||||||
*times += 1;
|
*times += 1;
|
||||||
//if times flips from all 1s 7 -> 8, 15 -> 16, we retry otherwise return Ok
|
//if times flips from all 1s 7 -> 8, 15 -> 16, we retry otherwise return Ok
|
||||||
if *times & (*times - 1) != 0 {
|
if *times & (*times - 1) != 0 {
|
||||||
trace!("repair_window counter {} {}", *times, *consumed);
|
trace!(
|
||||||
|
"repair_window counter {} {} {}",
|
||||||
|
*times,
|
||||||
|
*consumed,
|
||||||
|
*received
|
||||||
|
);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||||
|
if reqs.len() > 0 {
|
||||||
|
debug!(
|
||||||
|
"{:x}: repair_window counter times: {} consumed: {} received: {} missing: {}",
|
||||||
|
debug_id,
|
||||||
|
*times,
|
||||||
|
*consumed,
|
||||||
|
*received,
|
||||||
|
reqs.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||||
for (to, req) in reqs {
|
for (to, req) in reqs {
|
||||||
//todo cache socket
|
//todo cache socket
|
||||||
info!("repair_window request {} {} {}", *consumed, *received, to);
|
info!(
|
||||||
|
"{:x} repair_window request {} {} {}",
|
||||||
|
debug_id, *consumed, *received, to
|
||||||
|
);
|
||||||
assert!(req.len() < BLOB_SIZE);
|
assert!(req.len() < BLOB_SIZE);
|
||||||
sock.send_to(&req, to)?;
|
sock.send_to(&req, to)?;
|
||||||
}
|
}
|
||||||
|
@ -214,6 +234,7 @@ fn repair_window(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn recv_window(
|
fn recv_window(
|
||||||
|
debug_id: u64,
|
||||||
locked_window: &Window,
|
locked_window: &Window,
|
||||||
crdt: &Arc<RwLock<Crdt>>,
|
crdt: &Arc<RwLock<Crdt>>,
|
||||||
recycler: &BlobRecycler,
|
recycler: &BlobRecycler,
|
||||||
|
@ -225,45 +246,49 @@ fn recv_window(
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timer = Duration::from_millis(200);
|
let timer = Duration::from_millis(200);
|
||||||
let mut dq = r.recv_timeout(timer)?;
|
let mut dq = r.recv_timeout(timer)?;
|
||||||
let leader_id = crdt.read()
|
let maybe_leader: Option<ReplicatedData> = crdt.read()
|
||||||
.expect("'crdt' read lock in fn recv_window")
|
.expect("'crdt' read lock in fn recv_window")
|
||||||
.leader_data()
|
.leader_data()
|
||||||
.expect("leader not ready")
|
.cloned();
|
||||||
.id;
|
|
||||||
while let Ok(mut nq) = r.try_recv() {
|
while let Ok(mut nq) = r.try_recv() {
|
||||||
dq.append(&mut nq)
|
dq.append(&mut nq)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
//retransmit all leader blocks
|
//retransmit all leader blocks
|
||||||
let mut retransmitq = VecDeque::new();
|
let mut retransmitq = VecDeque::new();
|
||||||
for b in &dq {
|
if let Some(leader) = maybe_leader {
|
||||||
let p = b.read().expect("'b' read lock in fn recv_window");
|
for b in &dq {
|
||||||
//TODO this check isn't safe against adverserial packets
|
let p = b.read().expect("'b' read lock in fn recv_window");
|
||||||
//we need to maintain a sequence window
|
//TODO this check isn't safe against adverserial packets
|
||||||
trace!(
|
//we need to maintain a sequence window
|
||||||
"idx: {} addr: {:?} id: {:?} leader: {:?}",
|
let leader_id = leader.id;
|
||||||
p.get_index().expect("get_index in fn recv_window"),
|
trace!(
|
||||||
p.get_id().expect("get_id in trace! fn recv_window"),
|
"idx: {} addr: {:?} id: {:?} leader: {:?}",
|
||||||
p.meta.addr(),
|
p.get_index().expect("get_index in fn recv_window"),
|
||||||
leader_id
|
p.get_id().expect("get_id in trace! fn recv_window"),
|
||||||
);
|
p.meta.addr(),
|
||||||
if p.get_id().expect("get_id in fn recv_window") == leader_id {
|
leader_id
|
||||||
//TODO
|
);
|
||||||
//need to copy the retransmitted blob
|
if p.get_id().expect("get_id in fn recv_window") == leader_id {
|
||||||
//otherwise we get into races with which thread
|
//TODO
|
||||||
//should do the recycling
|
//need to copy the retransmitted blob
|
||||||
//
|
//otherwise we get into races with which thread
|
||||||
//a better abstraction would be to recycle when the blob
|
//should do the recycling
|
||||||
//is dropped via a weakref to the recycler
|
//
|
||||||
let nv = recycler.allocate();
|
//a better abstraction would be to recycle when the blob
|
||||||
{
|
//is dropped via a weakref to the recycler
|
||||||
let mut mnv = nv.write().expect("recycler write lock in fn recv_window");
|
let nv = recycler.allocate();
|
||||||
let sz = p.meta.size;
|
{
|
||||||
mnv.meta.size = sz;
|
let mut mnv = nv.write().expect("recycler write lock in fn recv_window");
|
||||||
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
|
let sz = p.meta.size;
|
||||||
|
mnv.meta.size = sz;
|
||||||
|
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
|
||||||
|
}
|
||||||
|
retransmitq.push_back(nv);
|
||||||
}
|
}
|
||||||
retransmitq.push_back(nv);
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
warn!("{:x}: no leader to retransmit from", debug_id);
|
||||||
}
|
}
|
||||||
if !retransmitq.is_empty() {
|
if !retransmitq.is_empty() {
|
||||||
retransmit.send(retransmitq)?;
|
retransmit.send(retransmitq)?;
|
||||||
|
@ -283,8 +308,8 @@ fn recv_window(
|
||||||
// probably from a repair window request
|
// probably from a repair window request
|
||||||
if pix < *consumed {
|
if pix < *consumed {
|
||||||
debug!(
|
debug!(
|
||||||
"received: {} but older than consumed: {} skipping..",
|
"{:x}: received: {} but older than consumed: {} skipping..",
|
||||||
pix, *consumed
|
debug_id, pix, *consumed
|
||||||
);
|
);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -316,9 +341,9 @@ fn recv_window(
|
||||||
window[w] = Some(b);
|
window[w] = Some(b);
|
||||||
} else if let Some(cblob) = &window[w] {
|
} else if let Some(cblob) = &window[w] {
|
||||||
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
||||||
warn!("overrun blob at index {:}", w);
|
warn!("{:x}: overrun blob at index {:}", debug_id, w);
|
||||||
} else {
|
} else {
|
||||||
debug!("duplicate blob at index {:}", w);
|
debug!("{:x}: duplicate blob at index {:}", debug_id, w);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
loop {
|
loop {
|
||||||
|
@ -404,7 +429,7 @@ fn print_window(locked_window: &Window, consumed: u64) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
debug!("WINDOW ({}): {}", consumed, buf.join(""));
|
trace!("WINDOW ({}): {}", consumed, buf.join(""));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -429,11 +454,13 @@ pub fn window(
|
||||||
let mut received = entry_height;
|
let mut received = entry_height;
|
||||||
let mut last = entry_height;
|
let mut last = entry_height;
|
||||||
let mut times = 0;
|
let mut times = 0;
|
||||||
|
let debug_id = crdt.read().unwrap().debug_id();
|
||||||
loop {
|
loop {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let _ = recv_window(
|
let _ = recv_window(
|
||||||
|
debug_id,
|
||||||
&window,
|
&window,
|
||||||
&crdt,
|
&crdt,
|
||||||
&recycler,
|
&recycler,
|
||||||
|
@ -444,6 +471,7 @@ pub fn window(
|
||||||
&retransmit,
|
&retransmit,
|
||||||
);
|
);
|
||||||
let _ = repair_window(
|
let _ = repair_window(
|
||||||
|
debug_id,
|
||||||
&window,
|
&window,
|
||||||
&crdt,
|
&crdt,
|
||||||
&recycler,
|
&recycler,
|
||||||
|
@ -452,12 +480,14 @@ pub fn window(
|
||||||
&mut consumed,
|
&mut consumed,
|
||||||
&mut received,
|
&mut received,
|
||||||
);
|
);
|
||||||
|
assert!(consumed <= (received + 1));
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn broadcast(
|
fn broadcast(
|
||||||
|
debug_id: u64,
|
||||||
crdt: &Arc<RwLock<Crdt>>,
|
crdt: &Arc<RwLock<Crdt>>,
|
||||||
window: &Window,
|
window: &Window,
|
||||||
recycler: &BlobRecycler,
|
recycler: &BlobRecycler,
|
||||||
|
@ -487,7 +517,7 @@ fn broadcast(
|
||||||
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
|
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
|
||||||
|
|
||||||
let blobs_len = blobs.len();
|
let blobs_len = blobs.len();
|
||||||
debug!("broadcast blobs.len: {}", blobs_len);
|
debug!("{:x} broadcast blobs.len: {}", debug_id, blobs_len);
|
||||||
|
|
||||||
// Index the blobs
|
// Index the blobs
|
||||||
Crdt::index_blobs(crdt, &blobs, receive_index)?;
|
Crdt::index_blobs(crdt, &blobs, receive_index)?;
|
||||||
|
@ -558,11 +588,13 @@ pub fn broadcaster(
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
let mut transmit_index = entry_height;
|
let mut transmit_index = entry_height;
|
||||||
let mut receive_index = entry_height;
|
let mut receive_index = entry_height;
|
||||||
|
let debug_id = crdt.read().unwrap().debug_id();
|
||||||
loop {
|
loop {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let _ = broadcast(
|
let _ = broadcast(
|
||||||
|
debug_id,
|
||||||
&crdt,
|
&crdt,
|
||||||
&window,
|
&window,
|
||||||
&recycler,
|
&recycler,
|
||||||
|
@ -737,6 +769,7 @@ mod bench {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use crdt::{Crdt, TestNode};
|
use crdt::{Crdt, TestNode};
|
||||||
|
use logger;
|
||||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
@ -821,6 +854,7 @@ mod test {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn window_send_test() {
|
pub fn window_send_test() {
|
||||||
|
logger::setup();
|
||||||
let tn = TestNode::new();
|
let tn = TestNode::new();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let mut crdt_me = Crdt::new(tn.data.clone());
|
let mut crdt_me = Crdt::new(tn.data.clone());
|
||||||
|
|
|
@ -241,6 +241,7 @@ mod tests {
|
||||||
fn test_thin_client() {
|
fn test_thin_client() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new();
|
||||||
|
let leader_data = leader.data.clone();
|
||||||
|
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
|
@ -251,12 +252,7 @@ mod tests {
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
Some(Duration::from_millis(30)),
|
Some(Duration::from_millis(30)),
|
||||||
leader.data.clone(),
|
leader,
|
||||||
leader.sockets.requests,
|
|
||||||
leader.sockets.transaction,
|
|
||||||
leader.sockets.broadcast,
|
|
||||||
leader.sockets.respond,
|
|
||||||
leader.sockets.gossip,
|
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
sink(),
|
sink(),
|
||||||
);
|
);
|
||||||
|
@ -266,9 +262,9 @@ mod tests {
|
||||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
|
||||||
let mut client = ThinClient::new(
|
let mut client = ThinClient::new(
|
||||||
leader.data.requests_addr,
|
leader_data.requests_addr,
|
||||||
requests_socket,
|
requests_socket,
|
||||||
leader.data.transactions_addr,
|
leader_data.transactions_addr,
|
||||||
transactions_socket,
|
transactions_socket,
|
||||||
);
|
);
|
||||||
let last_id = client.get_last_id();
|
let last_id = client.get_last_id();
|
||||||
|
@ -291,17 +287,13 @@ mod tests {
|
||||||
let bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let leader_data = leader.data.clone();
|
||||||
|
|
||||||
let server = FullNode::new_leader(
|
let server = FullNode::new_leader(
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
Some(Duration::from_millis(30)),
|
Some(Duration::from_millis(30)),
|
||||||
leader.data.clone(),
|
leader,
|
||||||
leader.sockets.requests,
|
|
||||||
leader.sockets.transaction,
|
|
||||||
leader.sockets.broadcast,
|
|
||||||
leader.sockets.respond,
|
|
||||||
leader.sockets.gossip,
|
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
sink(),
|
sink(),
|
||||||
);
|
);
|
||||||
|
@ -313,9 +305,9 @@ mod tests {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let mut client = ThinClient::new(
|
let mut client = ThinClient::new(
|
||||||
leader.data.requests_addr,
|
leader_data.requests_addr,
|
||||||
requests_socket,
|
requests_socket,
|
||||||
leader.data.transactions_addr,
|
leader_data.transactions_addr,
|
||||||
transactions_socket,
|
transactions_socket,
|
||||||
);
|
);
|
||||||
let last_id = client.get_last_id();
|
let last_id = client.get_last_id();
|
||||||
|
@ -349,17 +341,12 @@ mod tests {
|
||||||
let bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let leader_data = leader.data.clone();
|
||||||
let server = FullNode::new_leader(
|
let server = FullNode::new_leader(
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
Some(Duration::from_millis(30)),
|
Some(Duration::from_millis(30)),
|
||||||
leader.data.clone(),
|
leader,
|
||||||
leader.sockets.requests,
|
|
||||||
leader.sockets.transaction,
|
|
||||||
leader.sockets.broadcast,
|
|
||||||
leader.sockets.respond,
|
|
||||||
leader.sockets.gossip,
|
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
sink(),
|
sink(),
|
||||||
);
|
);
|
||||||
|
@ -371,9 +358,9 @@ mod tests {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let mut client = ThinClient::new(
|
let mut client = ThinClient::new(
|
||||||
leader.data.requests_addr,
|
leader_data.requests_addr,
|
||||||
requests_socket,
|
requests_socket,
|
||||||
leader.data.transactions_addr,
|
leader_data.transactions_addr,
|
||||||
transactions_socket,
|
transactions_socket,
|
||||||
);
|
);
|
||||||
let last_id = client.get_last_id();
|
let last_id = client.get_last_id();
|
||||||
|
|
|
@ -1,57 +1,29 @@
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
extern crate bincode;
|
extern crate bincode;
|
||||||
|
extern crate serde_json;
|
||||||
extern crate solana;
|
extern crate solana;
|
||||||
|
|
||||||
use solana::bank::Bank;
|
|
||||||
use solana::crdt::TestNode;
|
use solana::crdt::TestNode;
|
||||||
use solana::crdt::{Crdt, ReplicatedData};
|
use solana::crdt::{Crdt, ReplicatedData};
|
||||||
use solana::fullnode::FullNode;
|
use solana::entry_writer::EntryWriter;
|
||||||
|
use solana::fullnode::{FullNode, LedgerFile};
|
||||||
use solana::logger;
|
use solana::logger;
|
||||||
use solana::mint::Mint;
|
use solana::mint::Mint;
|
||||||
use solana::ncp::Ncp;
|
use solana::ncp::Ncp;
|
||||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||||
use solana::streamer::default_window;
|
use solana::streamer::default_window;
|
||||||
use solana::thin_client::ThinClient;
|
use solana::thin_client::ThinClient;
|
||||||
use std::io;
|
use std::fs::File;
|
||||||
use std::io::sink;
|
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::thread::JoinHandle;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
fn validator(
|
fn converge(leader: &ReplicatedData, num_nodes: usize) -> Vec<ReplicatedData> {
|
||||||
leader: &ReplicatedData,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
alice: &Mint,
|
|
||||||
threads: &mut Vec<JoinHandle<()>>,
|
|
||||||
) {
|
|
||||||
let validator = TestNode::new();
|
|
||||||
let replicant_bank = Bank::new(&alice);
|
|
||||||
let mut ts = FullNode::new_validator(
|
|
||||||
replicant_bank,
|
|
||||||
0,
|
|
||||||
validator.data.clone(),
|
|
||||||
validator.sockets.requests,
|
|
||||||
validator.sockets.respond,
|
|
||||||
validator.sockets.replicate,
|
|
||||||
validator.sockets.gossip,
|
|
||||||
validator.sockets.repair,
|
|
||||||
leader.clone(),
|
|
||||||
exit.clone(),
|
|
||||||
);
|
|
||||||
threads.append(&mut ts.thread_hdls);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn converge(
|
|
||||||
leader: &ReplicatedData,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
num_nodes: usize,
|
|
||||||
threads: &mut Vec<JoinHandle<()>>,
|
|
||||||
) -> (Vec<ReplicatedData>, PublicKey) {
|
|
||||||
//lets spy on the network
|
//lets spy on the network
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let mut spy = TestNode::new();
|
let mut spy = TestNode::new();
|
||||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
let me = spy.data.id.clone();
|
let me = spy.data.id.clone();
|
||||||
|
@ -67,30 +39,49 @@ fn converge(
|
||||||
spy_window,
|
spy_window,
|
||||||
spy.sockets.gossip,
|
spy.sockets.gossip,
|
||||||
spy.sockets.gossip_send,
|
spy.sockets.gossip_send,
|
||||||
exit,
|
exit.clone(),
|
||||||
).unwrap();
|
).unwrap();
|
||||||
//wait for the network to converge
|
//wait for the network to converge
|
||||||
let mut converged = false;
|
let mut converged = false;
|
||||||
|
let mut rv = vec![];
|
||||||
for _ in 0..30 {
|
for _ in 0..30 {
|
||||||
let num = spy_ref.read().unwrap().convergence();
|
let num = spy_ref.read().unwrap().convergence();
|
||||||
if num == num_nodes as u64 {
|
let mut v: Vec<ReplicatedData> = spy_ref
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.table
|
||||||
|
.values()
|
||||||
|
.into_iter()
|
||||||
|
.filter(|x| x.id != me)
|
||||||
|
.filter(|x| x.requests_addr != daddr)
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
if num >= num_nodes as u64 && v.len() >= num_nodes {
|
||||||
|
rv.append(&mut v);
|
||||||
converged = true;
|
converged = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sleep(Duration::new(1, 0));
|
sleep(Duration::new(1, 0));
|
||||||
}
|
}
|
||||||
assert!(converged);
|
assert!(converged);
|
||||||
threads.extend(dr.thread_hdls.into_iter());
|
exit.store(true, Ordering::Relaxed);
|
||||||
let v: Vec<ReplicatedData> = spy_ref
|
for t in dr.thread_hdls.into_iter() {
|
||||||
.read()
|
t.join().unwrap();
|
||||||
.unwrap()
|
}
|
||||||
.table
|
rv
|
||||||
.values()
|
}
|
||||||
.into_iter()
|
|
||||||
.filter(|x| x.id != me)
|
fn genesis(num: i64) -> (Mint, String) {
|
||||||
.map(|x| x.clone())
|
let mint = Mint::new(num);
|
||||||
.collect();
|
let id = {
|
||||||
(v.clone(), me)
|
let ids: Vec<_> = mint.pubkey().iter().map(|id| format!("{}", id)).collect();
|
||||||
|
ids.join("")
|
||||||
|
};
|
||||||
|
let path = format!("target/test_multi_node_dynamic_network-{}.log", id);
|
||||||
|
let mut writer = File::create(path.clone()).unwrap();
|
||||||
|
|
||||||
|
EntryWriter::write_entries(&mut writer, mint.create_entries()).unwrap();
|
||||||
|
(mint, path.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -99,34 +90,38 @@ fn test_multi_node_validator_catchup_from_zero() {
|
||||||
const N: usize = 5;
|
const N: usize = 5;
|
||||||
trace!("test_multi_node_validator_catchup_from_zero");
|
trace!("test_multi_node_validator_catchup_from_zero");
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new();
|
||||||
let alice = Mint::new(10_000);
|
let leader_data = leader.data.clone();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
let leader_bank = Bank::new(&alice);
|
let (alice, ledger_path) = genesis(10_000);
|
||||||
let server = FullNode::new_leader(
|
let server = FullNode::new(
|
||||||
leader_bank,
|
leader,
|
||||||
0,
|
true,
|
||||||
|
LedgerFile::Path(ledger_path.clone()),
|
||||||
None,
|
None,
|
||||||
leader.data.clone(),
|
LedgerFile::Sink,
|
||||||
leader.sockets.requests,
|
|
||||||
leader.sockets.transaction,
|
|
||||||
leader.sockets.broadcast,
|
|
||||||
leader.sockets.respond,
|
|
||||||
leader.sockets.gossip,
|
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
sink(),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut threads = server.thread_hdls;
|
let mut threads = server.thread_hdls;
|
||||||
for _ in 0..N {
|
for _ in 0..N {
|
||||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
let validator = TestNode::new();
|
||||||
|
let mut val = FullNode::new(
|
||||||
|
validator,
|
||||||
|
false,
|
||||||
|
LedgerFile::Path(ledger_path.clone()),
|
||||||
|
Some(leader_data.gossip_addr),
|
||||||
|
LedgerFile::NoFile,
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
|
threads.append(&mut val.thread_hdls);
|
||||||
}
|
}
|
||||||
let (servers, spy_id0) = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
let servers = converge(&leader_data, N + 1);
|
||||||
//contains the leader addr as well
|
//contains the leader addr as well
|
||||||
assert_eq!(servers.len(), N + 1);
|
assert_eq!(servers.len(), N + 1);
|
||||||
//verify leader can do transfer
|
//verify leader can do transfer
|
||||||
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
let leader_balance =
|
||||||
|
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, None).unwrap();
|
||||||
assert_eq!(leader_balance, 500);
|
assert_eq!(leader_balance, 500);
|
||||||
//verify validator has the same balance
|
//verify validator has the same balance
|
||||||
let mut success = 0usize;
|
let mut success = 0usize;
|
||||||
|
@ -144,13 +139,23 @@ fn test_multi_node_validator_catchup_from_zero() {
|
||||||
|
|
||||||
success = 0;
|
success = 0;
|
||||||
// start up another validator, converge and then check everyone's balances
|
// start up another validator, converge and then check everyone's balances
|
||||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
let mut val = FullNode::new(
|
||||||
let (servers, _) = converge(&leader.data, exit.clone(), N + 4, &mut threads);
|
TestNode::new(),
|
||||||
|
false,
|
||||||
|
LedgerFile::Path(ledger_path.clone()),
|
||||||
|
Some(leader_data.gossip_addr),
|
||||||
|
LedgerFile::NoFile,
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
|
threads.append(&mut val.thread_hdls);
|
||||||
|
//contains the leader and new node
|
||||||
|
let servers = converge(&leader_data, N + 2);
|
||||||
|
|
||||||
let mut leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
let mut leader_balance =
|
||||||
|
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, None).unwrap();
|
||||||
info!("leader balance {}", leader_balance);
|
info!("leader balance {}", leader_balance);
|
||||||
loop {
|
loop {
|
||||||
let mut client = mk_client(&leader.data);
|
let mut client = mk_client(&leader_data);
|
||||||
leader_balance = client.poll_get_balance(&bob_pubkey).unwrap();
|
leader_balance = client.poll_get_balance(&bob_pubkey).unwrap();
|
||||||
if leader_balance == 1000 {
|
if leader_balance == 1000 {
|
||||||
break;
|
break;
|
||||||
|
@ -160,22 +165,20 @@ fn test_multi_node_validator_catchup_from_zero() {
|
||||||
assert_eq!(leader_balance, 1000);
|
assert_eq!(leader_balance, 1000);
|
||||||
|
|
||||||
for server in servers.iter() {
|
for server in servers.iter() {
|
||||||
if server.id != spy_id0 {
|
let mut client = mk_client(server);
|
||||||
let mut client = mk_client(server);
|
info!("1server: {:?}", server.id[0]);
|
||||||
info!("1server: {:?}", server.id[0]);
|
for _ in 0..10 {
|
||||||
for _ in 0..10 {
|
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
|
||||||
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
|
info!("validator balance {}", bal);
|
||||||
info!("validator balance {}", bal);
|
if bal == leader_balance {
|
||||||
if bal == leader_balance {
|
success += 1;
|
||||||
success += 1;
|
break;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
sleep(Duration::from_millis(500));
|
|
||||||
}
|
}
|
||||||
|
sleep(Duration::from_millis(500));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert_eq!(success, (servers.len() - 1));
|
assert_eq!(success, servers.len());
|
||||||
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
for t in threads {
|
for t in threads {
|
||||||
|
@ -189,34 +192,36 @@ fn test_multi_node_basic() {
|
||||||
const N: usize = 5;
|
const N: usize = 5;
|
||||||
trace!("test_multi_node_basic");
|
trace!("test_multi_node_basic");
|
||||||
let leader = TestNode::new();
|
let leader = TestNode::new();
|
||||||
let alice = Mint::new(10_000);
|
let leader_data = leader.data.clone();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let (alice, ledger_path) = genesis(10_000);
|
||||||
let leader_bank = Bank::new(&alice);
|
let server = FullNode::new(
|
||||||
let server = FullNode::new_leader(
|
leader,
|
||||||
leader_bank,
|
true,
|
||||||
0,
|
LedgerFile::Path(ledger_path.clone()),
|
||||||
None,
|
None,
|
||||||
leader.data.clone(),
|
LedgerFile::Sink,
|
||||||
leader.sockets.requests,
|
|
||||||
leader.sockets.transaction,
|
|
||||||
leader.sockets.broadcast,
|
|
||||||
leader.sockets.respond,
|
|
||||||
leader.sockets.gossip,
|
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
sink(),
|
|
||||||
);
|
);
|
||||||
|
let threads = server.thread_hdls;
|
||||||
let mut threads = server.thread_hdls;
|
|
||||||
for _ in 0..N {
|
for _ in 0..N {
|
||||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
let validator = TestNode::new();
|
||||||
|
FullNode::new(
|
||||||
|
validator,
|
||||||
|
false,
|
||||||
|
LedgerFile::Path(ledger_path.clone()),
|
||||||
|
Some(leader_data.gossip_addr),
|
||||||
|
LedgerFile::NoFile,
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
let (servers, _) = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
let servers = converge(&leader_data, N + 1);
|
||||||
//contains the leader addr as well
|
//contains the leader addr as well
|
||||||
assert_eq!(servers.len(), N + 1);
|
assert_eq!(servers.len(), N + 1);
|
||||||
//verify leader can do transfer
|
//verify leader can do transfer
|
||||||
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
let leader_balance =
|
||||||
|
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, None).unwrap();
|
||||||
assert_eq!(leader_balance, 500);
|
assert_eq!(leader_balance, 500);
|
||||||
//verify validator has the same balance
|
//verify validator has the same balance
|
||||||
let mut success = 0usize;
|
let mut success = 0usize;
|
||||||
|
@ -235,6 +240,109 @@ fn test_multi_node_basic() {
|
||||||
for t in threads {
|
for t in threads {
|
||||||
t.join().unwrap();
|
t.join().unwrap();
|
||||||
}
|
}
|
||||||
|
std::fs::remove_file(ledger_path).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multi_node_dynamic_network() {
|
||||||
|
logger::setup();
|
||||||
|
const N: usize = 3;
|
||||||
|
let leader = TestNode::new();
|
||||||
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let (alice, ledger_path) = genesis(100_000);
|
||||||
|
let leader_data = leader.data.clone();
|
||||||
|
let server = FullNode::new(
|
||||||
|
leader,
|
||||||
|
true,
|
||||||
|
LedgerFile::Path(ledger_path.clone()),
|
||||||
|
None,
|
||||||
|
LedgerFile::Sink,
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
|
let threads = server.thread_hdls;
|
||||||
|
let leader_balance =
|
||||||
|
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(500)).unwrap();
|
||||||
|
assert_eq!(leader_balance, 500);
|
||||||
|
let leader_balance =
|
||||||
|
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(1000)).unwrap();
|
||||||
|
assert_eq!(leader_balance, 1000);
|
||||||
|
|
||||||
|
let mut vals: Vec<(ReplicatedData, Arc<AtomicBool>, FullNode)> = (0..N)
|
||||||
|
.into_iter()
|
||||||
|
.map(|_| {
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let validator = TestNode::new();
|
||||||
|
let rd = validator.data.clone();
|
||||||
|
let val = FullNode::new(
|
||||||
|
validator,
|
||||||
|
false,
|
||||||
|
LedgerFile::Path(ledger_path.clone()),
|
||||||
|
Some(leader_data.gossip_addr),
|
||||||
|
LedgerFile::NoFile,
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
|
(rd, exit, val)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
for i in 0..N {
|
||||||
|
//verify leader can do transfer
|
||||||
|
let expected = ((i + 3) * 500) as i64;
|
||||||
|
let leader_balance =
|
||||||
|
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(expected))
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(leader_balance, expected);
|
||||||
|
//verify all validators have the same balance
|
||||||
|
let mut success = 0usize;
|
||||||
|
for server in vals.iter() {
|
||||||
|
let mut client = mk_client(&server.0);
|
||||||
|
let getbal = retry_get_balance(&mut client, &bob_pubkey, Some(expected));
|
||||||
|
info!("{:x} {} get_balance: {:?}", server.0.debug_id(), i, getbal);
|
||||||
|
if let Some(bal) = getbal {
|
||||||
|
if bal == leader_balance {
|
||||||
|
success += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("SUCCESS {} out of {}", success, vals.len());
|
||||||
|
// this should be almost true, or at least vals.len() - 1 while the other node catches up
|
||||||
|
//assert!(success == vals.len());
|
||||||
|
//kill a validator
|
||||||
|
vals[i].1.store(true, Ordering::Relaxed);
|
||||||
|
let mut ts = vec![];
|
||||||
|
ts.append(&mut vals[i].2.thread_hdls);
|
||||||
|
for t in ts.into_iter() {
|
||||||
|
t.join().unwrap();
|
||||||
|
}
|
||||||
|
info!("{:x} KILLED", vals[i].0.debug_id());
|
||||||
|
//add a new one
|
||||||
|
vals[i] = {
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let validator = TestNode::new();
|
||||||
|
let rd = validator.data.clone();
|
||||||
|
let val = FullNode::new(
|
||||||
|
validator,
|
||||||
|
false,
|
||||||
|
LedgerFile::Path(ledger_path.clone()),
|
||||||
|
Some(leader_data.gossip_addr),
|
||||||
|
LedgerFile::NoFile,
|
||||||
|
exit.clone(),
|
||||||
|
);
|
||||||
|
info!("{:x} ADDED", rd.debug_id());
|
||||||
|
(rd, exit, val)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
for (_, exit, val) in vals.into_iter() {
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
for t in val.thread_hdls {
|
||||||
|
t.join().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
for t in threads {
|
||||||
|
t.join().unwrap();
|
||||||
|
}
|
||||||
|
std::fs::remove_file(ledger_path).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
||||||
|
@ -243,7 +351,9 @@ fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
||||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
|
assert!(leader.requests_addr != daddr);
|
||||||
|
assert!(leader.transactions_addr != daddr);
|
||||||
ThinClient::new(
|
ThinClient::new(
|
||||||
leader.requests_addr,
|
leader.requests_addr,
|
||||||
requests_socket,
|
requests_socket,
|
||||||
|
@ -252,11 +362,31 @@ fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tx_and_retry_get_balance(
|
fn retry_get_balance(
|
||||||
|
client: &mut ThinClient,
|
||||||
|
bob_pubkey: &PublicKey,
|
||||||
|
expected: Option<i64>,
|
||||||
|
) -> Option<i64> {
|
||||||
|
for _ in 0..10 {
|
||||||
|
let out = client.poll_get_balance(bob_pubkey);
|
||||||
|
if expected.is_none() {
|
||||||
|
return out.ok().clone();
|
||||||
|
}
|
||||||
|
if let (Some(e), Ok(o)) = (expected, out) {
|
||||||
|
if o == e {
|
||||||
|
return Some(o);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn send_tx_and_retry_get_balance(
|
||||||
leader: &ReplicatedData,
|
leader: &ReplicatedData,
|
||||||
alice: &Mint,
|
alice: &Mint,
|
||||||
bob_pubkey: &PublicKey,
|
bob_pubkey: &PublicKey,
|
||||||
) -> io::Result<i64> {
|
expected: Option<i64>,
|
||||||
|
) -> Option<i64> {
|
||||||
let mut client = mk_client(leader);
|
let mut client = mk_client(leader);
|
||||||
trace!("getting leader last_id");
|
trace!("getting leader last_id");
|
||||||
let last_id = client.get_last_id();
|
let last_id = client.get_last_id();
|
||||||
|
@ -264,5 +394,5 @@ fn tx_and_retry_get_balance(
|
||||||
let _sig = client
|
let _sig = client
|
||||||
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
client.poll_get_balance(bob_pubkey)
|
retry_get_balance(&mut client, bob_pubkey, expected)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue