ledger-tool can now load a ledger snapshot (#6729)

This commit is contained in:
Michael Vines 2019-11-04 22:14:55 -07:00 committed by GitHub
parent b825d04597
commit fba1af6ea9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 126 additions and 60 deletions

View File

@ -216,7 +216,7 @@ maybe_deploy_software() {
(
echo "--- net.sh restart"
set -x
time net/net.sh restart --skip-setup -t "$CHANNEL_OR_TAG" --skip-ledger-verify "$arg"
time net/net.sh restart --skip-setup -t "$CHANNEL_OR_TAG" --skip-poh-verify "$arg"
) || ok=false
if ! $ok; then
net/net.sh logs

View File

@ -118,7 +118,7 @@ impl Validator {
voting_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
entrypoint_info_option: Option<&ContactInfo>,
verify_ledger: bool,
poh_verify: bool,
config: &ValidatorConfig,
) -> Self {
let id = keypair.pubkey();
@ -171,7 +171,7 @@ impl Validator {
ledger_path,
config.account_paths.clone(),
config.snapshot_config.clone(),
verify_ledger,
poh_verify,
config.dev_halt_at_slot,
);
@ -452,7 +452,7 @@ pub fn new_banks_from_blocktree(
blocktree_path: &Path,
account_paths: Option<String>,
snapshot_config: Option<SnapshotConfig>,
verify_ledger: bool,
poh_verify: bool,
dev_halt_at_slot: Option<Slot>,
) -> (
Hash,
@ -488,7 +488,7 @@ pub fn new_banks_from_blocktree(
Blocktree::open_with_signal(blocktree_path).expect("Failed to open ledger database");
let process_options = blocktree_processor::ProcessOptions {
verify_ledger,
poh_verify,
dev_halt_at_slot,
..blocktree_processor::ProcessOptions::default()
};

View File

@ -1,6 +1,8 @@
use clap::{crate_description, crate_name, crate_version, value_t_or_exit, App, Arg, SubCommand};
use clap::{
crate_description, crate_name, crate_version, value_t, value_t_or_exit, App, Arg, SubCommand,
};
use solana_ledger::{
bank_forks_utils, blocktree::Blocktree, blocktree_processor,
bank_forks::SnapshotConfig, bank_forks_utils, blocktree::Blocktree, blocktree_processor,
rooted_slot_iterator::RootedSlotIterator,
};
use solana_sdk::{clock::Slot, genesis_block::GenesisBlock};
@ -71,7 +73,7 @@ fn output_ledger(blocktree: Blocktree, starting_slot: Slot, method: LedgerOutput
fn main() {
const DEFAULT_ROOT_COUNT: &str = "1";
solana_logger::setup();
solana_logger::setup_with_filter("solana=info");
let starting_slot_arg = Arg::with_name("starting_slot")
.long("starting-slot")
@ -103,8 +105,37 @@ fn main() {
))
.subcommand(SubCommand::with_name("bounds").about("Print lowest and highest non-empty slots. Note: This ignores gaps in slots"))
.subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format").arg(&starting_slot_arg))
.subcommand(SubCommand::with_name("verify").about("Verify the ledger's PoH"))
.subcommand(SubCommand::with_name("prune").about("Prune the ledger at the block height").arg(
.subcommand(
SubCommand::with_name("verify")
.about("Verify the ledger's PoH")
.arg(
Arg::with_name("no_snapshot")
.long("no-snapshot")
.takes_value(false)
.help("Do not start from a local snapshot if present"),
)
.arg(
Arg::with_name("account_paths")
.long("accounts")
.value_name("PATHS")
.takes_value(true)
.help("Comma separated persistent accounts location"),
)
.arg(
Arg::with_name("halt_at_slot")
.long("halt-at-slot")
.value_name("SLOT")
.takes_value(true)
.help("Halt processing at the given slot"),
)
.arg(
clap::Arg::with_name("skip_poh_verify")
.long("skip-poh-verify")
.takes_value(false)
.help("Skip ledger PoH verification"),
)
).subcommand(SubCommand::with_name("prune").about("Prune the ledger at the block height").arg(
Arg::with_name("slot_list")
.long("slot-list")
.value_name("FILENAME")
@ -169,15 +200,42 @@ fn main() {
let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot);
output_ledger(blocktree, starting_slot, LedgerOutputMethod::Json);
}
("verify", _) => {
("verify", Some(arg_matches)) => {
println!("Verifying ledger...");
let dev_halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok();
let poh_verify = !arg_matches.is_present("skip_poh_verify");
let snapshot_config = if arg_matches.is_present("no_snapshot") {
None
} else {
Some(SnapshotConfig {
snapshot_interval_slots: 0, // Value doesn't matter
snapshot_package_output_path: ledger_path.clone(),
snapshot_path: ledger_path.clone().join("snapshot"),
})
};
let account_paths = if let Some(account_paths) = matches.value_of("account_paths") {
Some(account_paths.to_string())
} else {
Some(ledger_path.join("accounts").to_str().unwrap().to_string())
};
let process_options = blocktree_processor::ProcessOptions {
verify_ledger: true,
poh_verify,
dev_halt_at_slot,
..blocktree_processor::ProcessOptions::default()
};
match bank_forks_utils::load(&genesis_block, &blocktree, None, None, process_options) {
Ok((_bank_forks, bank_forks_info, _leader_schedule_cache)) => {
println!("{:?}", bank_forks_info);
match bank_forks_utils::load(
&genesis_block,
&blocktree,
account_paths,
snapshot_config.as_ref(),
process_options,
) {
Ok((_bank_forks, _bank_forks_info, _leader_schedule_cache)) => {
println!("Ok");
}
Err(err) => {
eprintln!("Ledger verification failed: {:?}", err);

View File

@ -1,29 +1,32 @@
use crate::bank_forks::BankForks;
use crate::block_error::BlockError;
use crate::blocktree::Blocktree;
use crate::blocktree_meta::SlotMeta;
use crate::entry::{create_ticks, Entry, EntrySlice};
use crate::leader_schedule_cache::LeaderScheduleCache;
use crate::{
bank_forks::BankForks,
block_error::BlockError,
blocktree::Blocktree,
blocktree_meta::SlotMeta,
entry::{create_ticks, Entry, EntrySlice},
leader_schedule_cache::LeaderScheduleCache,
};
use itertools::Itertools;
use log::*;
use rand::seq::SliceRandom;
use rand::thread_rng;
use rayon::prelude::*;
use rayon::ThreadPool;
use rand::{seq::SliceRandom, thread_rng};
use rayon::{prelude::*, ThreadPool};
use solana_metrics::{datapoint, datapoint_error, inc_new_counter_debug};
use solana_runtime::bank::Bank;
use solana_runtime::transaction_batch::TransactionBatch;
use solana_sdk::clock::{Slot, MAX_RECENT_BLOCKHASHES};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::timing::duration_as_ms;
use solana_sdk::transaction::Result;
use std::result;
use std::sync::Arc;
use std::time::{Duration, Instant};
use solana_rayon_threadlimit::get_thread_count;
use std::cell::RefCell;
use solana_runtime::{bank::Bank, transaction_batch::TransactionBatch};
use solana_sdk::{
clock::{Slot, MAX_RECENT_BLOCKHASHES},
genesis_block::GenesisBlock,
hash::Hash,
signature::{Keypair, KeypairUtil},
timing::duration_as_ms,
transaction::Result,
};
use std::{
cell::RefCell,
result,
sync::Arc,
time::{Duration, Instant},
};
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
@ -193,7 +196,7 @@ pub type ProcessCallback = Arc<dyn Fn(&Bank) -> () + Sync + Send>;
#[derive(Default)]
pub struct ProcessOptions {
pub verify_ledger: bool,
pub poh_verify: bool,
pub full_leader_cache: bool,
pub dev_halt_at_slot: Option<Slot>,
pub entry_callback: Option<ProcessCallback>,
@ -279,9 +282,14 @@ pub fn process_blocktree_from_root(
};
info!(
"processing ledger...complete in {}ms, forks={}...",
"ledger processed in {}ms. {} fork{} at {}",
duration_as_ms(&now.elapsed()),
bank_forks_info.len(),
if bank_forks_info.len() > 1 { "s" } else { "" },
bank_forks_info
.iter()
.map(|bfi| bfi.bank_slot.to_string())
.join(", ")
);
Ok((bank_forks, bank_forks_info, leader_schedule_cache))
@ -295,7 +303,7 @@ fn verify_and_process_slot_entries(
) -> result::Result<Hash, BlocktreeProcessorError> {
assert!(!entries.is_empty());
if opts.verify_ledger {
if opts.poh_verify {
let next_bank_tick_height = bank.tick_height() + entries.tick_count();
let max_bank_tick_height = bank.max_tick_height();
if next_bank_tick_height != max_bank_tick_height {
@ -569,7 +577,7 @@ pub mod tests {
.expect("Expected to write shredded entries to blocktree");
let opts = ProcessOptions {
verify_ledger: true,
poh_verify: true,
..ProcessOptions::default()
};
assert_eq!(
@ -609,7 +617,7 @@ pub mod tests {
.expect("Expected to write shredded entries to blocktree");
let opts = ProcessOptions {
verify_ledger: true,
poh_verify: true,
..ProcessOptions::default()
};
assert_eq!(
@ -660,7 +668,7 @@ pub mod tests {
.expect("Expected to write shredded entries to blocktree");
let opts = ProcessOptions {
verify_ledger: true,
poh_verify: true,
..ProcessOptions::default()
};
assert_eq!(
@ -726,7 +734,7 @@ pub mod tests {
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
let opts = ProcessOptions {
verify_ledger: true,
poh_verify: true,
..ProcessOptions::default()
};
let (mut _bank_forks, bank_forks_info, _) =
@ -788,7 +796,7 @@ pub mod tests {
blocktree.set_roots(&[0, 1, 4]).unwrap();
let opts = ProcessOptions {
verify_ledger: true,
poh_verify: true,
..ProcessOptions::default()
};
let (bank_forks, bank_forks_info, _) =
@ -862,7 +870,7 @@ pub mod tests {
blocktree.set_roots(&[0, 1]).unwrap();
let opts = ProcessOptions {
verify_ledger: true,
poh_verify: true,
..ProcessOptions::default()
};
let (bank_forks, bank_forks_info, _) =
@ -942,7 +950,7 @@ pub mod tests {
// Check that we can properly restart the ledger / leader scheduler doesn't fail
let opts = ProcessOptions {
verify_ledger: true,
poh_verify: true,
..ProcessOptions::default()
};
let (bank_forks, bank_forks_info, _) =
@ -1089,7 +1097,7 @@ pub mod tests {
)
.unwrap();
let opts = ProcessOptions {
verify_ledger: true,
poh_verify: true,
..ProcessOptions::default()
};
let (bank_forks, bank_forks_info, _) =
@ -1118,7 +1126,7 @@ pub mod tests {
let blocktree = Blocktree::open(&ledger_path).unwrap();
let opts = ProcessOptions {
verify_ledger: true,
poh_verify: true,
..ProcessOptions::default()
};
let (bank_forks, bank_forks_info, _) =
@ -1857,7 +1865,7 @@ pub mod tests {
// Set up bank1
let bank0 = Arc::new(Bank::new(&genesis_block));
let opts = ProcessOptions {
verify_ledger: true,
poh_verify: true,
..ProcessOptions::default()
};
process_bank_0(&bank0, &blocktree, &opts).unwrap();

View File

@ -34,7 +34,7 @@ while [[ -n $1 ]]; do
elif [[ $1 = --limit-ledger-size ]]; then
args+=("$1")
shift
elif [[ $1 = --skip-ledger-verify ]]; then
elif [[ $1 = --skip-poh-verify ]]; then
args+=("$1")
shift
else

View File

@ -98,7 +98,7 @@ while [[ -n $1 ]]; do
elif [[ $1 = --limit-ledger-size ]]; then
args+=("$1")
shift
elif [[ $1 = --skip-ledger-verify ]]; then
elif [[ $1 = --skip-poh-verify ]]; then
args+=("$1")
shift
elif [[ $1 = --no-snapshot-fetch ]]; then
@ -140,7 +140,7 @@ while [[ -n $1 ]]; do
elif [[ $1 = --limit-ledger-size ]]; then
args+=("$1")
shift
elif [[ $1 = --skip-ledger-verify ]]; then
elif [[ $1 = --skip-poh-verify ]]; then
args+=("$1")
shift
elif [[ $1 = -h ]]; then

View File

@ -72,7 +72,7 @@ Operate a configured testnet
in genesis block for external nodes
--no-snapshot-fetch
- If set, disables booting validators from a snapshot
--skip-ledger-verify
--skip-poh-verify
- If set, validators will skip verifying
the ledger they already have saved to disk at
boot (results in a much faster boot)
@ -176,7 +176,7 @@ while [[ -n $1 ]]; do
elif [[ $1 = --limit-ledger-size ]]; then
maybeLimitLedgerSize="$1"
shift 1
elif [[ $1 = --skip-ledger-verify ]]; then
elif [[ $1 = --skip-poh-verify ]]; then
maybeSkipLedgerVerify="$1"
shift 1
elif [[ $1 = --skip-setup ]]; then

View File

@ -392,8 +392,8 @@ pub fn main() {
.help("drop older slots in the ledger"),
)
.arg(
clap::Arg::with_name("skip_ledger_verify")
.long("skip-ledger-verify")
clap::Arg::with_name("skip_poh_verify")
.long("skip-poh-verify")
.takes_value(false)
.help("Skip ledger verification at node bootup"),
)
@ -539,7 +539,7 @@ pub fn main() {
}
let init_complete_file = matches.value_of("init_complete_file");
let verify_ledger = !matches.is_present("skip_ledger_verify");
let skip_poh_verify = matches.is_present("skip_poh_verify");
validator_config.blockstream_unix_socket = matches
.value_of("blockstream_unix_socket")
.map(PathBuf::from);
@ -640,7 +640,7 @@ pub fn main() {
&Arc::new(voting_keypair),
&Arc::new(storage_keypair),
cluster_entrypoint.as_ref(),
verify_ledger,
!skip_poh_verify,
&validator_config,
);