Fix review comments
This commit is contained in:
parent
cc68ecdacf
commit
4149f7fd1c
|
@ -218,7 +218,9 @@ fn main() {
|
|||
let (keypair, gossip) = parse_identity(&matches);
|
||||
let ledger_path = matches.value_of("ledger").unwrap();
|
||||
if let Some(paths) = matches.value_of("accounts") {
|
||||
fullnode_config.account_paths = paths.to_string();
|
||||
fullnode_config.account_paths = Some(paths.to_string());
|
||||
} else {
|
||||
fullnode_config.account_paths = None;
|
||||
}
|
||||
let cluster_entrypoint = matches
|
||||
.value_of("network")
|
||||
|
|
|
@ -254,17 +254,19 @@ impl AccountsDB {
|
|||
.unwrap()
|
||||
.iter()
|
||||
.for_each(|p| {
|
||||
if let Some(entry) = self.index_info.index.read().unwrap().get(p) {
|
||||
let forks = entry.0.read().unwrap();
|
||||
if let Some((id, index)) = forks.get(&fork) {
|
||||
accounts.push(
|
||||
self.storage.read().unwrap()[*id]
|
||||
.appendvec
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_account(*index)
|
||||
.unwrap(),
|
||||
);
|
||||
if let Some(map) = self.index_info.index.read().unwrap().get(p) {
|
||||
let forks = map.0.read().unwrap();
|
||||
if let Some((id, offset)) = forks.get(&fork) {
|
||||
accounts.push(self.get_account(*id, *offset));
|
||||
} else {
|
||||
let fork_info = self.fork_info.read().unwrap();
|
||||
if let Some(info) = fork_info.get(&fork) {
|
||||
for parent_fork in info.parents.iter() {
|
||||
if let Some((id, offset)) = forks.get(&parent_fork) {
|
||||
accounts.push(self.get_account(*id, *offset));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -305,10 +307,10 @@ impl AccountsDB {
|
|||
Some(hash(&serialize(&ordered_accounts).unwrap()))
|
||||
}
|
||||
|
||||
fn get_account(&self, id: AppendVecId, offset: u64) -> Option<Account> {
|
||||
fn get_account(&self, id: AppendVecId, offset: u64) -> Account {
|
||||
let appendvec = &self.storage.read().unwrap()[id].appendvec;
|
||||
let av = appendvec.read().unwrap();
|
||||
Some(av.get_account(offset).unwrap())
|
||||
av.get_account(offset).unwrap()
|
||||
}
|
||||
|
||||
fn load(&self, fork: Fork, pubkey: &Pubkey, walk_back: bool) -> Option<Account> {
|
||||
|
@ -317,7 +319,7 @@ impl AccountsDB {
|
|||
let forks = map.0.read().unwrap();
|
||||
// find most recent fork that is an ancestor of current_fork
|
||||
if let Some((id, offset)) = forks.get(&fork) {
|
||||
return self.get_account(*id, *offset);
|
||||
return Some(self.get_account(*id, *offset));
|
||||
} else {
|
||||
if !walk_back {
|
||||
return None;
|
||||
|
@ -326,7 +328,7 @@ impl AccountsDB {
|
|||
if let Some(info) = fork_info.get(&fork) {
|
||||
for parent_fork in info.parents.iter() {
|
||||
if let Some((id, offset)) = forks.get(&parent_fork) {
|
||||
return self.get_account(*id, *offset);
|
||||
return Some(self.get_account(*id, *offset));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -689,7 +691,7 @@ impl AccountsDB {
|
|||
keys.push(pubkey.clone());
|
||||
}
|
||||
}
|
||||
let account = self.get_account(id, offset).unwrap();
|
||||
let account = self.get_account(id, offset);
|
||||
if account.tokens == 0 {
|
||||
if self.remove_account_entries(&[fork], &map) {
|
||||
keys.push(pubkey.clone());
|
||||
|
@ -737,11 +739,11 @@ impl Accounts {
|
|||
paths
|
||||
}
|
||||
|
||||
pub fn new(fork: Fork, in_paths: &str) -> Self {
|
||||
let paths = if !in_paths.is_empty() {
|
||||
in_paths.to_string()
|
||||
} else {
|
||||
pub fn new(fork: Fork, in_paths: Option<String>) -> Self {
|
||||
let paths = if in_paths.is_none() {
|
||||
Self::make_default_paths()
|
||||
} else {
|
||||
in_paths.unwrap()
|
||||
};
|
||||
let accounts_db = AccountsDB::new(fork, &paths);
|
||||
accounts_db.add_fork(fork, None);
|
||||
|
@ -932,7 +934,7 @@ mod tests {
|
|||
ka: &Vec<(Pubkey, Account)>,
|
||||
error_counters: &mut ErrorCounters,
|
||||
) -> Vec<Result<(InstructionAccounts, InstructionLoaders)>> {
|
||||
let accounts = Accounts::new(0, "");
|
||||
let accounts = Accounts::new(0, None);
|
||||
for ka in ka.iter() {
|
||||
accounts.store_slow(0, true, &ka.0, &ka.1);
|
||||
}
|
||||
|
@ -1373,7 +1375,7 @@ mod tests {
|
|||
// original account
|
||||
assert_eq!(db0.load(1, &key, true), Some(account1));
|
||||
|
||||
let mut accounts1 = Accounts::new(3, "");
|
||||
let mut accounts1 = Accounts::new(3, None);
|
||||
accounts1.accounts_db = db0;
|
||||
assert_eq!(accounts1.load_slow(1, &key), None);
|
||||
assert_eq!(accounts1.load_slow(0, &key), Some(account0));
|
||||
|
|
|
@ -160,35 +160,43 @@ where
|
|||
}
|
||||
|
||||
let mut at = data_at as usize;
|
||||
let data = &self.map[at..at + mem::size_of::<u64>()];
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
let ptr = data.as_ptr() as *mut u64;
|
||||
unsafe {
|
||||
let data = &self.map[at..at + mem::size_of::<u64>()];
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
let ptr = data.as_ptr() as *mut u64;
|
||||
std::ptr::write_unaligned(ptr, len as u64);
|
||||
at += mem::size_of::<u64>();
|
||||
}
|
||||
at += mem::size_of::<u64>();
|
||||
|
||||
let data = &self.map[at..at + mem::size_of::<u64>()];
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
let ptr = data.as_ptr() as *mut u64;
|
||||
let data = &self.map[at..at + mem::size_of::<u64>()];
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
let ptr = data.as_ptr() as *mut u64;
|
||||
unsafe {
|
||||
std::ptr::write_unaligned(ptr, account.tokens);
|
||||
at += mem::size_of::<u64>();
|
||||
}
|
||||
at += mem::size_of::<u64>();
|
||||
|
||||
let data = &self.map[at..at + account.userdata.len()];
|
||||
let dst = data.as_ptr() as *mut u8;
|
||||
let data = &account.userdata[0..account.userdata.len()];
|
||||
let src = data.as_ptr();
|
||||
let data = &self.map[at..at + account.userdata.len()];
|
||||
let dst = data.as_ptr() as *mut u8;
|
||||
let data = &account.userdata[0..account.userdata.len()];
|
||||
let src = data.as_ptr();
|
||||
unsafe {
|
||||
std::ptr::copy_nonoverlapping(src, dst, account.userdata.len());
|
||||
at += account.userdata.len();
|
||||
}
|
||||
at += account.userdata.len();
|
||||
|
||||
let data = &self.map[at..at + mem::size_of::<Pubkey>()];
|
||||
let ptr = data.as_ptr() as *mut Pubkey;
|
||||
let data = &self.map[at..at + mem::size_of::<Pubkey>()];
|
||||
let ptr = data.as_ptr() as *mut Pubkey;
|
||||
unsafe {
|
||||
std::ptr::write(ptr, account.owner);
|
||||
at += mem::size_of::<Pubkey>();
|
||||
}
|
||||
at += mem::size_of::<Pubkey>();
|
||||
|
||||
let data = &self.map[at..at + mem::size_of::<bool>()];
|
||||
let ptr = data.as_ptr() as *mut bool;
|
||||
let data = &self.map[at..at + mem::size_of::<bool>()];
|
||||
let ptr = data.as_ptr() as *mut bool;
|
||||
unsafe {
|
||||
std::ptr::write(ptr, account.executable);
|
||||
};
|
||||
}
|
||||
|
||||
self.current_offset
|
||||
.fetch_add(len + SIZEOF_U64, Ordering::Relaxed);
|
||||
|
|
|
@ -115,12 +115,12 @@ pub struct Bank {
|
|||
|
||||
impl Bank {
|
||||
pub fn new(genesis_block: &GenesisBlock) -> Self {
|
||||
Self::new_with_paths(&genesis_block, "")
|
||||
Self::new_with_paths(&genesis_block, None)
|
||||
}
|
||||
|
||||
pub fn new_with_paths(genesis_block: &GenesisBlock, paths: &str) -> Self {
|
||||
pub fn new_with_paths(genesis_block: &GenesisBlock, paths: Option<String>) -> Self {
|
||||
let mut bank = Self::default();
|
||||
bank.accounts = Some(Arc::new(Accounts::new(bank.id, &paths)));
|
||||
bank.accounts = Some(Arc::new(Accounts::new(bank.id, paths)));
|
||||
bank.process_genesis_block(genesis_block);
|
||||
bank.add_builtin_programs();
|
||||
bank
|
||||
|
@ -151,7 +151,11 @@ impl Bank {
|
|||
pub fn new_from_parent(parent: &Arc<Bank>) -> Self {
|
||||
static BANK_ID: AtomicUsize = AtomicUsize::new(1);
|
||||
let collector_id = parent.collector_id;
|
||||
Self::new_from_parent_and_id(parent, collector_id, BANK_ID.fetch_add(1, Ordering::Relaxed) as u64)
|
||||
Self::new_from_parent_and_id(
|
||||
parent,
|
||||
collector_id,
|
||||
BANK_ID.fetch_add(1, Ordering::Relaxed) as u64,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn id(&self) -> u64 {
|
||||
|
|
|
@ -107,7 +107,7 @@ pub struct BankForksInfo {
|
|||
pub fn process_blocktree(
|
||||
genesis_block: &GenesisBlock,
|
||||
blocktree: &Blocktree,
|
||||
account_paths: &str,
|
||||
account_paths: Option<String>,
|
||||
) -> Result<(BankForks, Vec<BankForksInfo>)> {
|
||||
let now = Instant::now();
|
||||
info!("processing ledger...");
|
||||
|
@ -298,7 +298,7 @@ mod tests {
|
|||
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, last_id);
|
||||
|
||||
let (mut _bank_forks, bank_forks_info) =
|
||||
process_blocktree(&genesis_block, &blocktree, "").unwrap();
|
||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1);
|
||||
assert_eq!(
|
||||
|
@ -357,7 +357,7 @@ mod tests {
|
|||
info!("last_fork2_entry_id: {:?}", last_fork2_entry_id);
|
||||
|
||||
let (mut bank_forks, bank_forks_info) =
|
||||
process_blocktree(&genesis_block, &blocktree, "").unwrap();
|
||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 2); // There are two forks
|
||||
assert_eq!(
|
||||
|
@ -475,7 +475,7 @@ mod tests {
|
|||
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
|
||||
blocktree.write_entries(1, 0, 0, &entries).unwrap();
|
||||
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
|
||||
let (bank_forks, bank_forks_info) = process_blocktree(&genesis_block, &blocktree, "").unwrap();
|
||||
let (bank_forks, bank_forks_info) = process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1);
|
||||
assert_eq!(
|
||||
|
|
|
@ -71,7 +71,7 @@ pub struct FullnodeConfig {
|
|||
pub blockstream: Option<String>,
|
||||
pub storage_rotate_count: u64,
|
||||
pub tick_config: PohServiceConfig,
|
||||
pub account_paths: String,
|
||||
pub account_paths: Option<String>,
|
||||
}
|
||||
impl Default for FullnodeConfig {
|
||||
fn default() -> Self {
|
||||
|
@ -85,7 +85,7 @@ impl Default for FullnodeConfig {
|
|||
blockstream: None,
|
||||
storage_rotate_count: NUM_HASHES_FOR_STORAGE_ROTATE,
|
||||
tick_config: PohServiceConfig::default(),
|
||||
account_paths: "".to_string(),
|
||||
account_paths: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ impl Fullnode {
|
|||
assert_eq!(id, node.info.id);
|
||||
|
||||
let (mut bank_forks, bank_forks_info, blocktree, ledger_signal_receiver) =
|
||||
new_banks_from_blocktree(ledger_path, &config.account_paths);
|
||||
new_banks_from_blocktree(ledger_path, config.account_paths.clone());
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank_info = &bank_forks_info[0];
|
||||
|
@ -407,7 +407,7 @@ impl Fullnode {
|
|||
|
||||
pub fn new_banks_from_blocktree(
|
||||
blocktree_path: &str,
|
||||
account_paths: &str,
|
||||
account_paths: Option<String>,
|
||||
) -> (BankForks, Vec<BankForksInfo>, Blocktree, Receiver<bool>) {
|
||||
let genesis_block =
|
||||
GenesisBlock::load(blocktree_path).expect("Expected to successfully open genesis block");
|
||||
|
@ -746,7 +746,7 @@ mod tests {
|
|||
|
||||
// Close the validator so that rocksdb has locks available
|
||||
validator_exit();
|
||||
let (bank_forks, bank_forks_info, _, _) = new_banks_from_blocktree(&validator_ledger_path, "");
|
||||
let (bank_forks, bank_forks_info, _, _) = new_banks_from_blocktree(&validator_ledger_path, None);
|
||||
let bank = bank_forks.working_bank();
|
||||
let entry_height = bank_forks_info[0].entry_height;
|
||||
|
||||
|
|
|
@ -499,7 +499,7 @@ mod test {
|
|||
let (to_leader_sender, _to_leader_receiver) = channel();
|
||||
{
|
||||
let (bank_forks, bank_forks_info, blocktree, l_receiver) =
|
||||
new_banks_from_blocktree(&my_ledger_path, "");
|
||||
new_banks_from_blocktree(&my_ledger_path, None);
|
||||
let bank = bank_forks.working_bank();
|
||||
let last_entry_id = bank_forks_info[0].last_entry_id;
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ impl Replicator {
|
|||
GenesisBlock::load(ledger_path).expect("Expected to successfully open genesis block");
|
||||
|
||||
let (bank_forks, _bank_forks_info) =
|
||||
blocktree_processor::process_blocktree(&genesis_block, &blocktree, "")
|
||||
blocktree_processor::process_blocktree(&genesis_block, &blocktree, None)
|
||||
.expect("process_blocktree failed");
|
||||
|
||||
let blocktree = Arc::new(blocktree);
|
||||
|
|
|
@ -911,7 +911,7 @@ fn test_leader_to_validator_transition() {
|
|||
leader_exit();
|
||||
|
||||
info!("Check the ledger to make sure it's the right height...");
|
||||
let bank_forks = new_banks_from_blocktree(&leader_ledger_path, "").0;
|
||||
let bank_forks = new_banks_from_blocktree(&leader_ledger_path, None).0;
|
||||
let _bank = bank_forks.working_bank();
|
||||
|
||||
remove_dir_all(leader_ledger_path).unwrap();
|
||||
|
|
Loading…
Reference in New Issue