2021-03-10 09:49:10 -08:00
|
|
|
use {
|
|
|
|
crate::{
|
2022-01-13 07:20:20 -08:00
|
|
|
accounts_db::{
|
2022-10-31 11:28:35 -07:00
|
|
|
AccountShrinkThreshold, AccountStorageMap, AccountsDbConfig, AtomicAppendVecId,
|
|
|
|
CalcAccountsHashDataSource, SnapshotStorage, SnapshotStorages,
|
2022-01-13 07:20:20 -08:00
|
|
|
},
|
2021-09-07 21:30:38 -07:00
|
|
|
accounts_index::AccountSecondaryIndexes,
|
2021-09-30 14:26:17 -07:00
|
|
|
accounts_update_notifier_interface::AccountsUpdateNotifier,
|
2022-07-06 15:30:30 -07:00
|
|
|
bank::{Bank, BankFieldsToDeserialize, BankSlotDelta},
|
2021-09-02 21:29:11 -07:00
|
|
|
builtins::Builtins,
|
2022-10-31 11:28:35 -07:00
|
|
|
hardened_unpack::{
|
|
|
|
streaming_unpack_snapshot, unpack_snapshot, ParallelSelector, UnpackError,
|
|
|
|
UnpackedAppendVecMap,
|
|
|
|
},
|
2022-08-05 12:49:00 -07:00
|
|
|
runtime_config::RuntimeConfig,
|
2022-07-06 15:30:30 -07:00
|
|
|
serde_snapshot::{
|
|
|
|
bank_from_streams, bank_to_stream, fields_from_streams, SerdeStyle, SnapshotStreams,
|
|
|
|
},
|
2021-07-08 11:44:47 -07:00
|
|
|
shared_buffer_reader::{SharedBuffer, SharedBufferReader},
|
2021-08-08 05:57:06 -07:00
|
|
|
snapshot_archive_info::{
|
|
|
|
FullSnapshotArchiveInfo, IncrementalSnapshotArchiveInfo, SnapshotArchiveInfoGetter,
|
|
|
|
},
|
2022-10-31 11:28:35 -07:00
|
|
|
snapshot_hash::SnapshotHash,
|
2022-10-13 09:47:36 -07:00
|
|
|
snapshot_package::{AccountsPackage, AccountsPackageType, SnapshotPackage, SnapshotType},
|
2022-10-31 11:28:35 -07:00
|
|
|
snapshot_utils::snapshot_storage_rebuilder::{
|
|
|
|
RebuiltSnapshotStorage, SnapshotStorageRebuilder,
|
|
|
|
},
|
2022-08-18 06:48:58 -07:00
|
|
|
status_cache,
|
2020-06-17 08:27:03 -07:00
|
|
|
},
|
2021-03-10 09:49:10 -08:00
|
|
|
bincode::{config::Options, serialize_into},
|
|
|
|
bzip2::bufread::BzDecoder,
|
2022-10-31 11:28:35 -07:00
|
|
|
crossbeam_channel::Sender,
|
2021-03-10 09:49:10 -08:00
|
|
|
flate2::read::GzDecoder,
|
2021-07-22 12:40:37 -07:00
|
|
|
lazy_static::lazy_static,
|
2021-03-10 09:49:10 -08:00
|
|
|
log::*,
|
2021-08-31 16:33:27 -07:00
|
|
|
rayon::prelude::*,
|
2021-03-10 09:49:10 -08:00
|
|
|
regex::Regex,
|
2022-08-29 11:17:27 -07:00
|
|
|
solana_measure::{measure, measure::Measure},
|
2022-08-18 06:48:58 -07:00
|
|
|
solana_sdk::{
|
|
|
|
clock::Slot,
|
|
|
|
genesis_config::GenesisConfig,
|
|
|
|
hash::Hash,
|
|
|
|
pubkey::Pubkey,
|
|
|
|
slot_history::{Check, SlotHistory},
|
|
|
|
},
|
2021-03-10 09:49:10 -08:00
|
|
|
std::{
|
2022-04-27 09:40:03 -07:00
|
|
|
cmp::Ordering,
|
|
|
|
collections::{HashMap, HashSet},
|
2021-03-10 09:49:10 -08:00
|
|
|
fmt,
|
|
|
|
fs::{self, File},
|
2021-08-04 15:07:55 -07:00
|
|
|
io::{BufReader, BufWriter, Error as IoError, ErrorKind, Read, Seek, Write},
|
2021-03-10 09:49:10 -08:00
|
|
|
path::{Path, PathBuf},
|
2021-08-04 15:07:55 -07:00
|
|
|
process::ExitStatus,
|
2021-03-10 09:49:10 -08:00
|
|
|
str::FromStr,
|
2022-09-12 11:51:12 -07:00
|
|
|
sync::{
|
|
|
|
atomic::{AtomicBool, AtomicU32},
|
|
|
|
Arc,
|
|
|
|
},
|
2022-10-31 11:28:35 -07:00
|
|
|
thread::{Builder, JoinHandle},
|
2021-02-04 07:00:33 -08:00
|
|
|
},
|
2021-08-04 15:07:55 -07:00
|
|
|
tar::{self, Archive},
|
2021-07-22 12:40:37 -07:00
|
|
|
tempfile::TempDir,
|
2021-03-10 09:49:10 -08:00
|
|
|
thiserror::Error,
|
2020-06-17 08:27:03 -07:00
|
|
|
};
|
2019-07-31 17:58:10 -07:00
|
|
|
|
2022-03-09 14:09:34 -08:00
|
|
|
mod archive_format;
|
2022-08-29 11:17:27 -07:00
|
|
|
mod snapshot_storage_rebuilder;
|
2022-03-09 14:09:34 -08:00
|
|
|
pub use archive_format::*;
|
|
|
|
|
2022-03-07 09:34:35 -08:00
|
|
|
pub const SNAPSHOT_STATUS_CACHE_FILENAME: &str = "status_cache";
|
2022-03-14 12:03:59 -07:00
|
|
|
pub const SNAPSHOT_ARCHIVE_DOWNLOAD_DIR: &str = "remote";
|
2022-02-15 17:37:45 -08:00
|
|
|
pub const DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = 25_000;
|
2021-09-10 13:59:26 -07:00
|
|
|
pub const DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = 100;
|
2020-05-22 10:54:24 -07:00
|
|
|
const MAX_SNAPSHOT_DATA_FILE_SIZE: u64 = 32 * 1024 * 1024 * 1024; // 32 GiB
|
2021-12-17 10:27:54 -08:00
|
|
|
const MAX_SNAPSHOT_VERSION_FILE_SIZE: u64 = 8; // byte
|
2020-05-22 10:54:24 -07:00
|
|
|
const VERSION_STRING_V1_2_0: &str = "1.2.0";
|
2021-08-31 16:33:27 -07:00
|
|
|
pub(crate) const TMP_BANK_SNAPSHOT_PREFIX: &str = "tmp-bank-snapshot-";
|
|
|
|
pub const TMP_SNAPSHOT_ARCHIVE_PREFIX: &str = "tmp-snapshot-archive-";
|
2022-04-06 19:39:26 -07:00
|
|
|
pub const BANK_SNAPSHOT_PRE_FILENAME_EXTENSION: &str = "pre";
|
2021-09-06 16:01:56 -07:00
|
|
|
pub const MAX_BANK_SNAPSHOTS_TO_RETAIN: usize = 8; // Save some bank snapshots but not too many
|
2021-07-22 12:40:37 -07:00
|
|
|
pub const DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN: usize = 2;
|
2021-09-04 05:37:29 -07:00
|
|
|
pub const DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN: usize = 4;
|
2022-05-31 09:06:41 -07:00
|
|
|
pub const FULL_SNAPSHOT_ARCHIVE_FILENAME_REGEX: &str = r"^snapshot-(?P<slot>[[:digit:]]+)-(?P<hash>[[:alnum:]]+)\.(?P<ext>tar|tar\.bz2|tar\.zst|tar\.gz|tar\.lz4)$";
|
|
|
|
pub const INCREMENTAL_SNAPSHOT_ARCHIVE_FILENAME_REGEX: &str = r"^incremental-snapshot-(?P<base>[[:digit:]]+)-(?P<slot>[[:digit:]]+)-(?P<hash>[[:alnum:]]+)\.(?P<ext>tar|tar\.bz2|tar\.zst|tar\.gz|tar\.lz4)$";
|
2021-07-01 10:20:56 -07:00
|
|
|
|
2020-05-22 10:54:24 -07:00
|
|
|
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
|
|
|
pub enum SnapshotVersion {
|
|
|
|
V1_2_0,
|
|
|
|
}
|
|
|
|
|
2020-06-18 22:38:37 -07:00
|
|
|
impl Default for SnapshotVersion {
|
|
|
|
fn default() -> Self {
|
2022-01-13 10:19:15 -08:00
|
|
|
SnapshotVersion::V1_2_0
|
2020-06-18 22:38:37 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl fmt::Display for SnapshotVersion {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
f.write_str(From::from(*self))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-22 10:54:24 -07:00
|
|
|
impl From<SnapshotVersion> for &'static str {
|
|
|
|
fn from(snapshot_version: SnapshotVersion) -> &'static str {
|
|
|
|
match snapshot_version {
|
|
|
|
SnapshotVersion::V1_2_0 => VERSION_STRING_V1_2_0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl FromStr for SnapshotVersion {
|
|
|
|
type Err = &'static str;
|
|
|
|
|
|
|
|
fn from_str(version_string: &str) -> std::result::Result<Self, Self::Err> {
|
2020-06-18 22:38:37 -07:00
|
|
|
// Remove leading 'v' or 'V' from slice
|
|
|
|
let version_string = if version_string
|
|
|
|
.get(..1)
|
|
|
|
.map_or(false, |s| s.eq_ignore_ascii_case("v"))
|
|
|
|
{
|
|
|
|
&version_string[1..]
|
|
|
|
} else {
|
|
|
|
version_string
|
|
|
|
};
|
2020-05-22 10:54:24 -07:00
|
|
|
match version_string {
|
|
|
|
VERSION_STRING_V1_2_0 => Ok(SnapshotVersion::V1_2_0),
|
|
|
|
_ => Err("unsupported snapshot version"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SnapshotVersion {
|
|
|
|
pub fn as_str(self) -> &'static str {
|
|
|
|
<&str as From<Self>>::from(self)
|
|
|
|
}
|
|
|
|
}
|
2020-02-26 20:10:31 -08:00
|
|
|
|
2022-04-06 19:39:26 -07:00
|
|
|
/// Information about a bank snapshot. Namely the slot of the bank, the path to the snapshot, and
|
|
|
|
/// the type of the snapshot.
|
2020-08-14 11:43:14 -07:00
|
|
|
#[derive(PartialEq, Eq, Debug)]
|
2021-07-22 12:40:37 -07:00
|
|
|
pub struct BankSnapshotInfo {
|
2022-04-06 19:39:26 -07:00
|
|
|
/// Slot of the bank
|
2019-11-02 00:38:30 -07:00
|
|
|
pub slot: Slot,
|
2022-04-06 19:39:26 -07:00
|
|
|
/// Path to the snapshot
|
2021-07-22 12:40:37 -07:00
|
|
|
pub snapshot_path: PathBuf,
|
2022-04-06 19:39:26 -07:00
|
|
|
/// Type of the snapshot
|
|
|
|
pub snapshot_type: BankSnapshotType,
|
2021-07-22 12:40:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
impl PartialOrd for BankSnapshotInfo {
|
|
|
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
|
|
|
Some(self.cmp(other))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-03 07:57:15 -07:00
|
|
|
// Order BankSnapshotInfo by slot (ascending), which practically is sorting chronologically
|
2021-07-22 12:40:37 -07:00
|
|
|
impl Ord for BankSnapshotInfo {
|
|
|
|
fn cmp(&self, other: &Self) -> Ordering {
|
|
|
|
self.slot.cmp(&other.slot)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-06 19:39:26 -07:00
|
|
|
/// Bank snapshots traditionally had their accounts hash calculated prior to serialization. Since
|
|
|
|
/// the hash calculation takes a long time, an optimization has been put in to offload the accounts
|
|
|
|
/// hash calculation. The bank serialization format has not changed, so we need another way to
|
|
|
|
/// identify if a bank snapshot contains the calculated accounts hash or not.
|
|
|
|
///
|
|
|
|
/// When a bank snapshot is first taken, it does not have the calculated accounts hash. It is said
|
|
|
|
/// that this bank snapshot is "pre" accounts hash. Later, when the accounts hash is calculated,
|
|
|
|
/// the bank snapshot is re-serialized, and is now "post" accounts hash.
|
|
|
|
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
|
|
|
pub enum BankSnapshotType {
|
|
|
|
/// This bank snapshot has *not* yet had its accounts hash calculated
|
|
|
|
Pre,
|
|
|
|
/// This bank snapshot *has* had its accounts hash calculated
|
|
|
|
Post,
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Helper type when rebuilding from snapshots. Designed to handle when rebuilding from just a
|
|
|
|
/// full snapshot, or from both a full snapshot and an incremental snapshot.
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct SnapshotRootPaths {
|
|
|
|
full_snapshot_root_file_path: PathBuf,
|
|
|
|
incremental_snapshot_root_file_path: Option<PathBuf>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Helper type to bundle up the results from `unarchive_snapshot()`
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct UnarchivedSnapshot {
|
2021-10-05 22:24:48 -07:00
|
|
|
#[allow(dead_code)]
|
2021-07-22 12:40:37 -07:00
|
|
|
unpack_dir: TempDir,
|
2022-08-29 11:17:27 -07:00
|
|
|
storage: AccountStorageMap,
|
2021-07-22 12:40:37 -07:00
|
|
|
unpacked_snapshots_dir_and_version: UnpackedSnapshotsDirAndVersion,
|
|
|
|
measure_untar: Measure,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Helper type for passing around the unpacked snapshots dir and the snapshot version together
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct UnpackedSnapshotsDirAndVersion {
|
|
|
|
unpacked_snapshots_dir: PathBuf,
|
2022-09-22 09:44:25 -07:00
|
|
|
snapshot_version: SnapshotVersion,
|
2019-08-06 18:47:30 -07:00
|
|
|
}
|
|
|
|
|
2022-08-29 11:17:27 -07:00
|
|
|
/// Helper type for passing around account storage map and next append vec id
|
|
|
|
/// for reconstructing accounts from a snapshot
|
|
|
|
pub(crate) struct StorageAndNextAppendVecId {
|
|
|
|
pub storage: AccountStorageMap,
|
|
|
|
pub next_append_vec_id: AtomicAppendVecId,
|
|
|
|
}
|
|
|
|
|
2019-12-02 14:42:05 -08:00
|
|
|
#[derive(Error, Debug)]
|
2022-03-31 08:51:18 -07:00
|
|
|
#[allow(clippy::large_enum_variant)]
|
2019-10-18 18:16:06 -07:00
|
|
|
pub enum SnapshotError {
|
2020-09-25 11:33:43 -07:00
|
|
|
#[error("I/O error: {0}")]
|
2021-02-18 23:42:09 -08:00
|
|
|
Io(#[from] std::io::Error),
|
2019-10-18 18:16:06 -07:00
|
|
|
|
2020-09-25 11:33:43 -07:00
|
|
|
#[error("serialization error: {0}")]
|
2020-06-17 01:56:29 -07:00
|
|
|
Serialize(#[from] bincode::Error),
|
2019-10-18 18:16:06 -07:00
|
|
|
|
2020-01-23 10:20:37 -08:00
|
|
|
#[error("archive generation failure {0}")]
|
|
|
|
ArchiveGenerationFailure(ExitStatus),
|
|
|
|
|
|
|
|
#[error("storage path symlink is invalid")]
|
|
|
|
StoragePathSymlinkInvalid,
|
2020-03-25 02:46:41 -07:00
|
|
|
|
2020-09-25 11:33:43 -07:00
|
|
|
#[error("Unpack error: {0}")]
|
2020-03-25 02:46:41 -07:00
|
|
|
UnpackError(#[from] UnpackError),
|
2020-09-28 16:04:46 -07:00
|
|
|
|
2021-07-20 23:55:24 -07:00
|
|
|
#[error("source({1}) - I/O error: {0}")]
|
|
|
|
IoWithSource(std::io::Error, &'static str),
|
2019-10-18 18:16:06 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
#[error("could not get file name from path: {}", .0.display())]
|
|
|
|
PathToFileNameError(PathBuf),
|
2019-08-06 18:47:30 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
#[error("could not get str from file name: {}", .0.display())]
|
|
|
|
FileNameToStrError(PathBuf),
|
|
|
|
|
|
|
|
#[error("could not parse snapshot archive's file name: {0}")]
|
|
|
|
ParseSnapshotArchiveFileNameError(String),
|
|
|
|
|
|
|
|
#[error("snapshots are incompatible: full snapshot slot ({0}) and incremental snapshot base slot ({1}) do not match")]
|
|
|
|
MismatchedBaseSlot(Slot, Slot),
|
2021-08-06 18:16:06 -07:00
|
|
|
|
|
|
|
#[error("no snapshot archives to load from")]
|
|
|
|
NoSnapshotArchives,
|
|
|
|
|
|
|
|
#[error("snapshot has mismatch: deserialized bank: {:?}, snapshot archive info: {:?}", .0, .1)]
|
2022-10-31 11:28:35 -07:00
|
|
|
MismatchedSlotHash((Slot, SnapshotHash), (Slot, SnapshotHash)),
|
2022-08-18 06:48:58 -07:00
|
|
|
|
|
|
|
#[error("snapshot slot deltas are invalid: {0}")]
|
|
|
|
VerifySlotDeltas(#[from] VerifySlotDeltasError),
|
2020-08-14 11:43:14 -07:00
|
|
|
}
|
2021-07-22 12:40:37 -07:00
|
|
|
pub type Result<T> = std::result::Result<T, SnapshotError>;
|
2020-08-14 11:43:14 -07:00
|
|
|
|
2022-08-18 06:48:58 -07:00
|
|
|
/// Errors that can happen in `verify_slot_deltas()`
|
|
|
|
#[derive(Error, Debug, PartialEq, Eq)]
|
|
|
|
pub enum VerifySlotDeltasError {
|
|
|
|
#[error("too many entries: {0} (max: {1})")]
|
|
|
|
TooManyEntries(usize, usize),
|
|
|
|
|
|
|
|
#[error("slot {0} is not a root")]
|
|
|
|
SlotIsNotRoot(Slot),
|
|
|
|
|
|
|
|
#[error("slot {0} is greater than bank slot {1}")]
|
|
|
|
SlotGreaterThanMaxRoot(Slot, Slot),
|
|
|
|
|
|
|
|
#[error("slot {0} has multiple entries")]
|
|
|
|
SlotHasMultipleEntries(Slot),
|
|
|
|
|
|
|
|
#[error("slot {0} was not found in slot history")]
|
|
|
|
SlotNotFoundInHistory(Slot),
|
|
|
|
|
|
|
|
#[error("slot {0} was in history but missing from slot deltas")]
|
|
|
|
SlotNotFoundInDeltas(Slot),
|
|
|
|
|
|
|
|
#[error("slot history is bad and cannot be used to verify slot deltas")]
|
|
|
|
BadSlotHistory,
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// If the validator halts in the middle of `archive_snapshot_package()`, the temporary staging
|
|
|
|
/// directory won't be cleaned up. Call this function to clean them up.
|
2021-08-21 13:41:03 -07:00
|
|
|
pub fn remove_tmp_snapshot_archives(snapshot_archives_dir: impl AsRef<Path>) {
|
2021-07-22 12:40:37 -07:00
|
|
|
if let Ok(entries) = fs::read_dir(snapshot_archives_dir) {
|
2020-11-30 17:37:38 -08:00
|
|
|
for entry in entries.filter_map(|entry| entry.ok()) {
|
2021-07-22 12:40:37 -07:00
|
|
|
let file_name = entry
|
2020-11-30 17:37:38 -08:00
|
|
|
.file_name()
|
|
|
|
.into_string()
|
2021-07-22 12:40:37 -07:00
|
|
|
.unwrap_or_else(|_| String::new());
|
2021-08-31 16:33:27 -07:00
|
|
|
if file_name.starts_with(TMP_SNAPSHOT_ARCHIVE_PREFIX) {
|
2021-01-11 11:38:55 -08:00
|
|
|
if entry.path().is_file() {
|
|
|
|
fs::remove_file(entry.path())
|
|
|
|
} else {
|
|
|
|
fs::remove_dir_all(entry.path())
|
|
|
|
}
|
|
|
|
.unwrap_or_else(|err| {
|
2020-11-30 17:37:38 -08:00
|
|
|
warn!("Failed to remove {}: {}", entry.path().display(), err)
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-17 11:01:59 -07:00
|
|
|
/// Make a snapshot archive out of the snapshot package
|
2021-05-12 10:32:27 -07:00
|
|
|
pub fn archive_snapshot_package(
|
2021-08-13 14:08:09 -07:00
|
|
|
snapshot_package: &SnapshotPackage,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
2021-09-06 16:01:56 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain: usize,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain: usize,
|
2021-05-12 10:32:27 -07:00
|
|
|
) -> Result<()> {
|
2020-01-23 10:20:37 -08:00
|
|
|
info!(
|
|
|
|
"Generating snapshot archive for slot {}",
|
2021-08-06 18:16:06 -07:00
|
|
|
snapshot_package.slot()
|
2020-01-23 10:20:37 -08:00
|
|
|
);
|
|
|
|
|
|
|
|
serialize_status_cache(
|
2021-08-06 18:16:06 -07:00
|
|
|
snapshot_package.slot(),
|
2020-01-23 10:20:37 -08:00
|
|
|
&snapshot_package.slot_deltas,
|
2021-01-07 22:45:42 -08:00
|
|
|
&snapshot_package
|
|
|
|
.snapshot_links
|
|
|
|
.path()
|
2022-03-07 09:34:35 -08:00
|
|
|
.join(SNAPSHOT_STATUS_CACHE_FILENAME),
|
2020-01-23 10:20:37 -08:00
|
|
|
)?;
|
|
|
|
|
|
|
|
let mut timer = Measure::start("snapshot_package-package_snapshots");
|
|
|
|
let tar_dir = snapshot_package
|
2021-08-06 18:16:06 -07:00
|
|
|
.path()
|
2020-01-23 10:20:37 -08:00
|
|
|
.parent()
|
|
|
|
.expect("Tar output path is invalid");
|
|
|
|
|
2021-07-20 23:55:24 -07:00
|
|
|
fs::create_dir_all(tar_dir)
|
|
|
|
.map_err(|e| SnapshotError::IoWithSource(e, "create archive path"))?;
|
2020-01-23 10:20:37 -08:00
|
|
|
|
|
|
|
// Create the staging directories
|
2021-08-31 16:33:27 -07:00
|
|
|
let staging_dir_prefix = TMP_SNAPSHOT_ARCHIVE_PREFIX;
|
2020-11-30 17:37:38 -08:00
|
|
|
let staging_dir = tempfile::Builder::new()
|
2021-01-11 10:21:15 -08:00
|
|
|
.prefix(&format!(
|
|
|
|
"{}{}-",
|
2021-08-17 11:01:59 -07:00
|
|
|
staging_dir_prefix,
|
2021-08-06 18:16:06 -07:00
|
|
|
snapshot_package.slot()
|
2021-01-11 10:21:15 -08:00
|
|
|
))
|
2021-07-20 23:55:24 -07:00
|
|
|
.tempdir_in(tar_dir)
|
|
|
|
.map_err(|e| SnapshotError::IoWithSource(e, "create archive tempdir"))?;
|
2020-11-30 17:37:38 -08:00
|
|
|
|
2021-03-10 09:49:10 -08:00
|
|
|
let staging_accounts_dir = staging_dir.path().join("accounts");
|
|
|
|
let staging_snapshots_dir = staging_dir.path().join("snapshots");
|
|
|
|
let staging_version_file = staging_dir.path().join("version");
|
2021-07-20 23:55:24 -07:00
|
|
|
fs::create_dir_all(&staging_accounts_dir)
|
|
|
|
.map_err(|e| SnapshotError::IoWithSource(e, "create staging path"))?;
|
2020-01-23 10:20:37 -08:00
|
|
|
|
|
|
|
// Add the snapshots to the staging directory
|
|
|
|
symlink::symlink_dir(
|
|
|
|
snapshot_package.snapshot_links.path(),
|
|
|
|
&staging_snapshots_dir,
|
2021-07-20 23:55:24 -07:00
|
|
|
)
|
|
|
|
.map_err(|e| SnapshotError::IoWithSource(e, "create staging symlinks"))?;
|
2020-01-23 10:20:37 -08:00
|
|
|
|
|
|
|
// Add the AppendVecs into the compressible list
|
2021-08-31 16:33:27 -07:00
|
|
|
for storage in snapshot_package.snapshot_storages.iter().flatten() {
|
2020-01-23 10:20:37 -08:00
|
|
|
storage.flush()?;
|
|
|
|
let storage_path = storage.get_path();
|
2021-03-10 11:26:37 -08:00
|
|
|
let output_path = staging_accounts_dir.join(crate::append_vec::AppendVec::file_name(
|
|
|
|
storage.slot(),
|
|
|
|
storage.append_vec_id(),
|
|
|
|
));
|
2020-01-23 10:20:37 -08:00
|
|
|
|
|
|
|
// `storage_path` - The file path where the AppendVec itself is located
|
2021-01-02 18:08:15 -08:00
|
|
|
// `output_path` - The file path where the AppendVec will be placed in the staging directory.
|
2020-01-23 10:20:37 -08:00
|
|
|
let storage_path =
|
|
|
|
fs::canonicalize(storage_path).expect("Could not get absolute path for accounts");
|
2021-07-20 23:55:24 -07:00
|
|
|
symlink::symlink_file(storage_path, &output_path)
|
|
|
|
.map_err(|e| SnapshotError::IoWithSource(e, "create storage symlink"))?;
|
2020-01-23 10:20:37 -08:00
|
|
|
if !output_path.is_file() {
|
|
|
|
return Err(SnapshotError::StoragePathSymlinkInvalid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write version file
|
|
|
|
{
|
2021-07-20 23:55:24 -07:00
|
|
|
let mut f = fs::File::create(staging_version_file)
|
|
|
|
.map_err(|e| SnapshotError::IoWithSource(e, "create version file"))?;
|
|
|
|
f.write_all(snapshot_package.snapshot_version.as_str().as_bytes())
|
|
|
|
.map_err(|e| SnapshotError::IoWithSource(e, "write version file"))?;
|
2020-01-23 10:20:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tar the staging directory into the archive at `archive_path`
|
2021-01-11 11:38:55 -08:00
|
|
|
let archive_path = tar_dir.join(format!(
|
2021-05-27 10:00:27 -07:00
|
|
|
"{}{}.{}",
|
2021-08-17 11:01:59 -07:00
|
|
|
staging_dir_prefix,
|
2021-08-06 18:16:06 -07:00
|
|
|
snapshot_package.slot(),
|
2022-03-09 14:09:34 -08:00
|
|
|
snapshot_package.archive_format().extension(),
|
2021-01-11 11:38:55 -08:00
|
|
|
));
|
2020-09-06 17:25:49 -07:00
|
|
|
|
2021-08-04 15:07:55 -07:00
|
|
|
{
|
|
|
|
let mut archive_file = fs::File::create(&archive_path)?;
|
|
|
|
|
|
|
|
let do_archive_files = |encoder: &mut dyn Write| -> Result<()> {
|
|
|
|
let mut archive = tar::Builder::new(encoder);
|
2022-08-17 10:57:52 -07:00
|
|
|
// Serialize the version and snapshots files before accounts so we can quickly determine the version
|
|
|
|
// and other bank fields. This is necessary if we want to interleave unpacking with reconstruction
|
|
|
|
archive.append_path_with_name(staging_dir.as_ref().join("version"), "version")?;
|
2022-06-13 07:33:32 -07:00
|
|
|
for dir in ["snapshots", "accounts"] {
|
2021-08-04 15:07:55 -07:00
|
|
|
archive.append_dir_all(dir, staging_dir.as_ref().join(dir))?;
|
|
|
|
}
|
|
|
|
archive.into_inner()?;
|
|
|
|
Ok(())
|
|
|
|
};
|
2020-09-06 17:25:49 -07:00
|
|
|
|
2021-08-06 18:16:06 -07:00
|
|
|
match snapshot_package.archive_format() {
|
2021-08-04 15:07:55 -07:00
|
|
|
ArchiveFormat::TarBzip2 => {
|
|
|
|
let mut encoder =
|
|
|
|
bzip2::write::BzEncoder::new(archive_file, bzip2::Compression::best());
|
|
|
|
do_archive_files(&mut encoder)?;
|
|
|
|
encoder.finish()?;
|
|
|
|
}
|
|
|
|
ArchiveFormat::TarGzip => {
|
|
|
|
let mut encoder =
|
|
|
|
flate2::write::GzEncoder::new(archive_file, flate2::Compression::default());
|
|
|
|
do_archive_files(&mut encoder)?;
|
|
|
|
encoder.finish()?;
|
|
|
|
}
|
|
|
|
ArchiveFormat::TarZstd => {
|
|
|
|
let mut encoder = zstd::stream::Encoder::new(archive_file, 0)?;
|
|
|
|
do_archive_files(&mut encoder)?;
|
|
|
|
encoder.finish()?;
|
|
|
|
}
|
2022-05-16 10:44:15 -07:00
|
|
|
ArchiveFormat::TarLz4 => {
|
|
|
|
let mut encoder = lz4::EncoderBuilder::new().level(1).build(archive_file)?;
|
|
|
|
do_archive_files(&mut encoder)?;
|
|
|
|
let (_output, result) = encoder.finish();
|
|
|
|
result?
|
|
|
|
}
|
2021-08-04 15:07:55 -07:00
|
|
|
ArchiveFormat::Tar => {
|
|
|
|
do_archive_files(&mut archive_file)?;
|
|
|
|
}
|
|
|
|
};
|
2020-01-23 10:20:37 -08:00
|
|
|
}
|
|
|
|
|
2020-09-06 17:25:49 -07:00
|
|
|
// Atomically move the archive into position for other validators to find
|
2021-07-20 23:55:24 -07:00
|
|
|
let metadata = fs::metadata(&archive_path)
|
|
|
|
.map_err(|e| SnapshotError::IoWithSource(e, "archive path stat"))?;
|
2022-09-22 15:23:03 -07:00
|
|
|
fs::rename(&archive_path, snapshot_package.path())
|
2021-07-20 23:55:24 -07:00
|
|
|
.map_err(|e| SnapshotError::IoWithSource(e, "archive path rename"))?;
|
2020-01-23 10:20:37 -08:00
|
|
|
|
2021-09-04 05:37:29 -07:00
|
|
|
purge_old_snapshot_archives(
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir,
|
|
|
|
incremental_snapshot_archives_dir,
|
2021-09-06 16:01:56 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain,
|
2021-09-04 05:37:29 -07:00
|
|
|
);
|
2020-02-24 12:37:14 -08:00
|
|
|
|
2020-01-23 10:20:37 -08:00
|
|
|
timer.stop();
|
|
|
|
info!(
|
2020-02-26 20:28:53 -08:00
|
|
|
"Successfully created {:?}. slot: {}, elapsed ms: {}, size={}",
|
2021-08-06 18:16:06 -07:00
|
|
|
snapshot_package.path(),
|
|
|
|
snapshot_package.slot(),
|
2020-01-23 10:20:37 -08:00
|
|
|
timer.as_ms(),
|
|
|
|
metadata.len()
|
|
|
|
);
|
2022-03-26 10:29:13 -07:00
|
|
|
|
2020-01-23 10:20:37 -08:00
|
|
|
datapoint_info!(
|
2022-03-26 10:29:13 -07:00
|
|
|
"archive-snapshot-package",
|
2021-08-06 18:16:06 -07:00
|
|
|
("slot", snapshot_package.slot(), i64),
|
2022-05-16 10:44:15 -07:00
|
|
|
(
|
|
|
|
"archive_format",
|
|
|
|
snapshot_package.archive_format().to_string(),
|
|
|
|
String
|
|
|
|
),
|
2020-01-23 10:20:37 -08:00
|
|
|
("duration_ms", timer.as_ms(), i64),
|
2022-03-26 10:29:13 -07:00
|
|
|
(
|
|
|
|
if snapshot_package.snapshot_type.is_full_snapshot() {
|
|
|
|
"full-snapshot-archive-size"
|
|
|
|
} else {
|
|
|
|
"incremental-snapshot-archive-size"
|
|
|
|
},
|
|
|
|
metadata.len(),
|
|
|
|
i64
|
|
|
|
),
|
2020-01-23 10:20:37 -08:00
|
|
|
);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-04-06 19:39:26 -07:00
|
|
|
/// Get the bank snapshots in a directory
|
|
|
|
pub fn get_bank_snapshots(bank_snapshots_dir: impl AsRef<Path>) -> Vec<BankSnapshotInfo> {
|
|
|
|
let mut bank_snapshots = Vec::default();
|
2021-08-21 13:41:03 -07:00
|
|
|
match fs::read_dir(&bank_snapshots_dir) {
|
2019-08-23 13:02:07 -07:00
|
|
|
Err(err) => {
|
|
|
|
info!(
|
2022-04-06 19:39:26 -07:00
|
|
|
"Unable to read bank snapshots directory {}: {}",
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.as_ref().display(),
|
2021-07-22 12:40:37 -07:00
|
|
|
err
|
2019-08-23 13:02:07 -07:00
|
|
|
);
|
|
|
|
}
|
2022-04-06 19:39:26 -07:00
|
|
|
Ok(paths) => paths
|
|
|
|
.filter_map(|entry| {
|
|
|
|
// check if this entry is a directory and only a Slot
|
|
|
|
// bank snapshots are bank_snapshots_dir/slot/slot(BANK_SNAPSHOT_PRE_FILENAME_EXTENSION)
|
|
|
|
entry
|
|
|
|
.ok()
|
|
|
|
.filter(|entry| entry.path().is_dir())
|
|
|
|
.and_then(|entry| {
|
|
|
|
entry
|
|
|
|
.path()
|
|
|
|
.file_name()
|
|
|
|
.and_then(|file_name| file_name.to_str())
|
|
|
|
.and_then(|file_name| file_name.parse::<Slot>().ok())
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.for_each(|slot| {
|
|
|
|
// check this directory to see if there is a BankSnapshotPre and/or
|
|
|
|
// BankSnapshotPost file
|
|
|
|
let bank_snapshot_outer_dir = get_bank_snapshots_dir(&bank_snapshots_dir, slot);
|
|
|
|
let bank_snapshot_post_path =
|
|
|
|
bank_snapshot_outer_dir.join(get_snapshot_file_name(slot));
|
|
|
|
let mut bank_snapshot_pre_path = bank_snapshot_post_path.clone();
|
|
|
|
bank_snapshot_pre_path.set_extension(BANK_SNAPSHOT_PRE_FILENAME_EXTENSION);
|
|
|
|
|
|
|
|
if bank_snapshot_pre_path.is_file() {
|
|
|
|
bank_snapshots.push(BankSnapshotInfo {
|
|
|
|
slot,
|
|
|
|
snapshot_path: bank_snapshot_pre_path,
|
|
|
|
snapshot_type: BankSnapshotType::Pre,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
if bank_snapshot_post_path.is_file() {
|
|
|
|
bank_snapshots.push(BankSnapshotInfo {
|
|
|
|
slot,
|
|
|
|
snapshot_path: bank_snapshot_post_path,
|
|
|
|
snapshot_type: BankSnapshotType::Post,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}),
|
2019-08-23 13:02:07 -07:00
|
|
|
}
|
2022-04-06 19:39:26 -07:00
|
|
|
bank_snapshots
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the bank snapshots in a directory
|
|
|
|
///
|
|
|
|
/// This function retains only the bank snapshots of type BankSnapshotType::Pre
|
|
|
|
pub fn get_bank_snapshots_pre(bank_snapshots_dir: impl AsRef<Path>) -> Vec<BankSnapshotInfo> {
|
|
|
|
let mut bank_snapshots = get_bank_snapshots(bank_snapshots_dir);
|
|
|
|
bank_snapshots.retain(|bank_snapshot| bank_snapshot.snapshot_type == BankSnapshotType::Pre);
|
|
|
|
bank_snapshots
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the bank snapshots in a directory
|
|
|
|
///
|
|
|
|
/// This function retains only the bank snapshots of type BankSnapshotType::Post
|
|
|
|
pub fn get_bank_snapshots_post(bank_snapshots_dir: impl AsRef<Path>) -> Vec<BankSnapshotInfo> {
|
|
|
|
let mut bank_snapshots = get_bank_snapshots(bank_snapshots_dir);
|
|
|
|
bank_snapshots.retain(|bank_snapshot| bank_snapshot.snapshot_type == BankSnapshotType::Post);
|
|
|
|
bank_snapshots
|
2019-07-31 17:58:10 -07:00
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Get the bank snapshot with the highest slot in a directory
|
2022-04-06 19:39:26 -07:00
|
|
|
///
|
|
|
|
/// This function gets the highest bank snapshot of type BankSnapshotType::Pre
|
|
|
|
pub fn get_highest_bank_snapshot_pre(
|
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
|
|
|
) -> Option<BankSnapshotInfo> {
|
|
|
|
do_get_highest_bank_snapshot(get_bank_snapshots_pre(bank_snapshots_dir))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the bank snapshot with the highest slot in a directory
|
|
|
|
///
|
|
|
|
/// This function gets the highest bank snapshot of type BankSnapshotType::Post
|
|
|
|
pub fn get_highest_bank_snapshot_post(
|
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
|
|
|
) -> Option<BankSnapshotInfo> {
|
|
|
|
do_get_highest_bank_snapshot(get_bank_snapshots_post(bank_snapshots_dir))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn do_get_highest_bank_snapshot(
|
|
|
|
mut bank_snapshots: Vec<BankSnapshotInfo>,
|
|
|
|
) -> Option<BankSnapshotInfo> {
|
|
|
|
bank_snapshots.sort_unstable();
|
|
|
|
bank_snapshots.into_iter().rev().next()
|
2021-07-22 12:40:37 -07:00
|
|
|
}
|
|
|
|
|
2020-05-22 10:54:24 -07:00
|
|
|
pub fn serialize_snapshot_data_file<F>(data_file_path: &Path, serializer: F) -> Result<u64>
|
|
|
|
where
|
|
|
|
F: FnOnce(&mut BufWriter<File>) -> Result<()>,
|
|
|
|
{
|
|
|
|
serialize_snapshot_data_file_capped::<F>(
|
|
|
|
data_file_path,
|
|
|
|
MAX_SNAPSHOT_DATA_FILE_SIZE,
|
|
|
|
serializer,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
pub fn deserialize_snapshot_data_file<T: Sized>(
|
|
|
|
data_file_path: &Path,
|
|
|
|
deserializer: impl FnOnce(&mut BufReader<File>) -> Result<T>,
|
|
|
|
) -> Result<T> {
|
|
|
|
let wrapped_deserializer = move |streams: &mut SnapshotStreams<File>| -> Result<T> {
|
2021-10-05 22:24:48 -07:00
|
|
|
deserializer(streams.full_snapshot_stream)
|
2021-07-22 12:40:37 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
let wrapped_data_file_path = SnapshotRootPaths {
|
|
|
|
full_snapshot_root_file_path: data_file_path.to_path_buf(),
|
|
|
|
incremental_snapshot_root_file_path: None,
|
|
|
|
};
|
|
|
|
|
|
|
|
deserialize_snapshot_data_files_capped(
|
|
|
|
&wrapped_data_file_path,
|
|
|
|
MAX_SNAPSHOT_DATA_FILE_SIZE,
|
|
|
|
wrapped_deserializer,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn deserialize_snapshot_data_files<T: Sized>(
|
|
|
|
snapshot_root_paths: &SnapshotRootPaths,
|
|
|
|
deserializer: impl FnOnce(&mut SnapshotStreams<File>) -> Result<T>,
|
|
|
|
) -> Result<T> {
|
|
|
|
deserialize_snapshot_data_files_capped(
|
|
|
|
snapshot_root_paths,
|
2020-05-22 10:54:24 -07:00
|
|
|
MAX_SNAPSHOT_DATA_FILE_SIZE,
|
|
|
|
deserializer,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn serialize_snapshot_data_file_capped<F>(
|
2020-01-09 16:49:36 -08:00
|
|
|
data_file_path: &Path,
|
|
|
|
maximum_file_size: u64,
|
2020-05-22 10:54:24 -07:00
|
|
|
serializer: F,
|
2020-01-09 16:49:36 -08:00
|
|
|
) -> Result<u64>
|
|
|
|
where
|
2020-05-22 10:54:24 -07:00
|
|
|
F: FnOnce(&mut BufWriter<File>) -> Result<()>,
|
2020-01-09 16:49:36 -08:00
|
|
|
{
|
|
|
|
let data_file = File::create(data_file_path)?;
|
|
|
|
let mut data_file_stream = BufWriter::new(data_file);
|
|
|
|
serializer(&mut data_file_stream)?;
|
|
|
|
data_file_stream.flush()?;
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let consumed_size = data_file_stream.stream_position()?;
|
2020-01-09 16:49:36 -08:00
|
|
|
if consumed_size > maximum_file_size {
|
|
|
|
let error_message = format!(
|
|
|
|
"too large snapshot data file to serialize: {:?} has {} bytes",
|
|
|
|
data_file_path, consumed_size
|
|
|
|
);
|
|
|
|
return Err(get_io_error(&error_message));
|
|
|
|
}
|
|
|
|
Ok(consumed_size)
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
fn deserialize_snapshot_data_files_capped<T: Sized>(
|
|
|
|
snapshot_root_paths: &SnapshotRootPaths,
|
2020-01-09 16:49:36 -08:00
|
|
|
maximum_file_size: u64,
|
2021-07-22 12:40:37 -07:00
|
|
|
deserializer: impl FnOnce(&mut SnapshotStreams<File>) -> Result<T>,
|
|
|
|
) -> Result<T> {
|
|
|
|
let (full_snapshot_file_size, mut full_snapshot_data_file_stream) =
|
|
|
|
create_snapshot_data_file_stream(
|
|
|
|
&snapshot_root_paths.full_snapshot_root_file_path,
|
|
|
|
maximum_file_size,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
let (incremental_snapshot_file_size, mut incremental_snapshot_data_file_stream) =
|
|
|
|
if let Some(ref incremental_snapshot_root_file_path) =
|
|
|
|
snapshot_root_paths.incremental_snapshot_root_file_path
|
|
|
|
{
|
|
|
|
let (incremental_snapshot_file_size, incremental_snapshot_data_file_stream) =
|
|
|
|
create_snapshot_data_file_stream(
|
|
|
|
incremental_snapshot_root_file_path,
|
|
|
|
maximum_file_size,
|
|
|
|
)?;
|
|
|
|
(
|
|
|
|
Some(incremental_snapshot_file_size),
|
|
|
|
Some(incremental_snapshot_data_file_stream),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
(None, None)
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut snapshot_streams = SnapshotStreams {
|
|
|
|
full_snapshot_stream: &mut full_snapshot_data_file_stream,
|
|
|
|
incremental_snapshot_stream: incremental_snapshot_data_file_stream.as_mut(),
|
|
|
|
};
|
|
|
|
let ret = deserializer(&mut snapshot_streams)?;
|
|
|
|
|
|
|
|
check_deserialize_file_consumed(
|
|
|
|
full_snapshot_file_size,
|
|
|
|
&snapshot_root_paths.full_snapshot_root_file_path,
|
|
|
|
&mut full_snapshot_data_file_stream,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
if let Some(ref incremental_snapshot_root_file_path) =
|
|
|
|
snapshot_root_paths.incremental_snapshot_root_file_path
|
|
|
|
{
|
|
|
|
check_deserialize_file_consumed(
|
|
|
|
incremental_snapshot_file_size.unwrap(),
|
|
|
|
incremental_snapshot_root_file_path,
|
|
|
|
incremental_snapshot_data_file_stream.as_mut().unwrap(),
|
|
|
|
)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(ret)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Before running the deserializer function, perform common operations on the snapshot archive
|
|
|
|
/// files, such as checking the file size and opening the file into a stream.
|
|
|
|
fn create_snapshot_data_file_stream<P>(
|
|
|
|
snapshot_root_file_path: P,
|
|
|
|
maximum_file_size: u64,
|
|
|
|
) -> Result<(u64, BufReader<File>)>
|
2020-01-09 16:49:36 -08:00
|
|
|
where
|
2021-07-22 12:40:37 -07:00
|
|
|
P: AsRef<Path>,
|
2020-01-09 16:49:36 -08:00
|
|
|
{
|
2021-07-22 12:40:37 -07:00
|
|
|
let snapshot_file_size = fs::metadata(&snapshot_root_file_path)?.len();
|
2020-01-09 16:49:36 -08:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
if snapshot_file_size > maximum_file_size {
|
|
|
|
let error_message =
|
|
|
|
format!(
|
|
|
|
"too large snapshot data file to deserialize: {} has {} bytes (max size is {} bytes)",
|
|
|
|
snapshot_root_file_path.as_ref().display(), snapshot_file_size, maximum_file_size
|
2020-01-09 16:49:36 -08:00
|
|
|
);
|
|
|
|
return Err(get_io_error(&error_message));
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let snapshot_data_file = File::open(&snapshot_root_file_path)?;
|
|
|
|
let snapshot_data_file_stream = BufReader::new(snapshot_data_file);
|
2020-01-09 16:49:36 -08:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
Ok((snapshot_file_size, snapshot_data_file_stream))
|
|
|
|
}
|
2020-01-09 16:49:36 -08:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// After running the deserializer function, perform common checks to ensure the snapshot archive
|
|
|
|
/// files were consumed correctly.
|
|
|
|
fn check_deserialize_file_consumed<P>(
|
|
|
|
file_size: u64,
|
|
|
|
file_path: P,
|
|
|
|
file_stream: &mut BufReader<File>,
|
|
|
|
) -> Result<()>
|
|
|
|
where
|
|
|
|
P: AsRef<Path>,
|
|
|
|
{
|
|
|
|
let consumed_size = file_stream.stream_position()?;
|
2020-01-09 16:49:36 -08:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
if consumed_size != file_size {
|
|
|
|
let error_message =
|
|
|
|
format!(
|
|
|
|
"invalid snapshot data file: {} has {} bytes, however consumed {} bytes to deserialize",
|
|
|
|
file_path.as_ref().display(), file_size, consumed_size
|
2020-01-09 16:49:36 -08:00
|
|
|
);
|
|
|
|
return Err(get_io_error(&error_message));
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
Ok(())
|
2020-01-09 16:49:36 -08:00
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Serialize a bank to a snapshot
|
2022-11-07 07:09:31 -08:00
|
|
|
///
|
|
|
|
/// **DEVELOPER NOTE** Any error that is returned from this function may bring down the node! This
|
|
|
|
/// function is called from AccountsBackgroundService to handle snapshot requests. Since taking a
|
|
|
|
/// snapshot is not permitted to fail, any errors returned here will trigger the node to shutdown.
|
|
|
|
/// So, be careful whenever adding new code that may return errors.
|
|
|
|
pub fn add_bank_snapshot(
|
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
2020-02-20 22:27:55 -08:00
|
|
|
bank: &Bank,
|
|
|
|
snapshot_storages: &[SnapshotStorage],
|
2020-06-18 22:38:37 -07:00
|
|
|
snapshot_version: SnapshotVersion,
|
2021-07-22 12:40:37 -07:00
|
|
|
) -> Result<BankSnapshotInfo> {
|
2022-11-07 07:09:31 -08:00
|
|
|
let mut add_snapshot_time = Measure::start("add-snapshot-ms");
|
2019-07-31 17:58:10 -07:00
|
|
|
let slot = bank.slot();
|
2021-08-21 13:41:03 -07:00
|
|
|
// bank_snapshots_dir/slot
|
|
|
|
let bank_snapshots_dir = get_bank_snapshots_dir(bank_snapshots_dir, slot);
|
2021-07-22 12:40:37 -07:00
|
|
|
fs::create_dir_all(&bank_snapshots_dir)?;
|
2019-07-31 17:58:10 -07:00
|
|
|
|
2022-04-06 19:39:26 -07:00
|
|
|
// the bank snapshot is stored as bank_snapshots_dir/slot/slot.BANK_SNAPSHOT_PRE_FILENAME_EXTENSION
|
|
|
|
let mut bank_snapshot_path = bank_snapshots_dir.join(get_snapshot_file_name(slot));
|
|
|
|
bank_snapshot_path.set_extension(BANK_SNAPSHOT_PRE_FILENAME_EXTENSION);
|
|
|
|
|
2019-08-05 22:53:19 -07:00
|
|
|
info!(
|
2022-04-06 19:39:26 -07:00
|
|
|
"Creating bank snapshot for slot {}, path: {}",
|
|
|
|
slot,
|
|
|
|
bank_snapshot_path.display(),
|
2019-07-31 17:58:10 -07:00
|
|
|
);
|
2019-10-03 19:44:23 -07:00
|
|
|
|
2020-01-09 16:49:36 -08:00
|
|
|
let mut bank_serialize = Measure::start("bank-serialize-ms");
|
2020-05-22 10:54:24 -07:00
|
|
|
let bank_snapshot_serializer = move |stream: &mut BufWriter<File>| -> Result<()> {
|
2020-06-18 22:38:37 -07:00
|
|
|
let serde_style = match snapshot_version {
|
2021-02-18 23:42:09 -08:00
|
|
|
SnapshotVersion::V1_2_0 => SerdeStyle::Newer,
|
2020-06-18 22:38:37 -07:00
|
|
|
};
|
2020-07-13 07:00:59 -07:00
|
|
|
bank_to_stream(serde_style, stream.by_ref(), bank, snapshot_storages)?;
|
2020-05-22 10:54:24 -07:00
|
|
|
Ok(())
|
|
|
|
};
|
|
|
|
let consumed_size =
|
2022-04-06 19:39:26 -07:00
|
|
|
serialize_snapshot_data_file(&bank_snapshot_path, bank_snapshot_serializer)?;
|
2020-01-09 16:49:36 -08:00
|
|
|
bank_serialize.stop();
|
2022-11-07 07:09:31 -08:00
|
|
|
add_snapshot_time.stop();
|
2020-01-09 16:49:36 -08:00
|
|
|
|
|
|
|
// Monitor sizes because they're capped to MAX_SNAPSHOT_DATA_FILE_SIZE
|
|
|
|
datapoint_info!(
|
|
|
|
"snapshot-bank-file",
|
2020-01-23 08:46:30 -08:00
|
|
|
("slot", slot, i64),
|
2020-01-09 16:49:36 -08:00
|
|
|
("size", consumed_size, i64)
|
|
|
|
);
|
|
|
|
|
|
|
|
inc_new_counter_info!("bank-serialize-ms", bank_serialize.as_ms() as usize);
|
2022-11-07 07:09:31 -08:00
|
|
|
inc_new_counter_info!("add-snapshot-ms", add_snapshot_time.as_ms() as usize);
|
2019-09-25 18:07:41 -07:00
|
|
|
|
2019-08-05 22:53:19 -07:00
|
|
|
info!(
|
2022-04-06 19:39:26 -07:00
|
|
|
"{} for slot {} at {}",
|
|
|
|
bank_serialize,
|
|
|
|
slot,
|
|
|
|
bank_snapshot_path.display(),
|
2019-07-31 17:58:10 -07:00
|
|
|
);
|
2019-08-06 18:47:30 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
Ok(BankSnapshotInfo {
|
2020-01-23 08:46:30 -08:00
|
|
|
slot,
|
2022-04-06 19:39:26 -07:00
|
|
|
snapshot_path: bank_snapshot_path,
|
|
|
|
snapshot_type: BankSnapshotType::Pre,
|
2020-01-23 08:46:30 -08:00
|
|
|
})
|
2019-07-31 17:58:10 -07:00
|
|
|
}
|
|
|
|
|
2021-01-02 09:09:50 -08:00
|
|
|
fn serialize_status_cache(
|
2020-01-09 16:49:36 -08:00
|
|
|
slot: Slot,
|
2020-02-10 03:11:37 -08:00
|
|
|
slot_deltas: &[BankSlotDelta],
|
2021-01-04 12:48:34 -08:00
|
|
|
status_cache_path: &Path,
|
2020-01-09 16:49:36 -08:00
|
|
|
) -> Result<()> {
|
|
|
|
let mut status_cache_serialize = Measure::start("status_cache_serialize-ms");
|
2021-01-04 12:48:34 -08:00
|
|
|
let consumed_size = serialize_snapshot_data_file(status_cache_path, |stream| {
|
2020-05-22 10:54:24 -07:00
|
|
|
serialize_into(stream, slot_deltas)?;
|
|
|
|
Ok(())
|
|
|
|
})?;
|
2020-01-09 16:49:36 -08:00
|
|
|
status_cache_serialize.stop();
|
|
|
|
|
|
|
|
// Monitor sizes because they're capped to MAX_SNAPSHOT_DATA_FILE_SIZE
|
|
|
|
datapoint_info!(
|
|
|
|
"snapshot-status-cache-file",
|
|
|
|
("slot", slot, i64),
|
|
|
|
("size", consumed_size, i64)
|
|
|
|
);
|
|
|
|
|
|
|
|
inc_new_counter_info!(
|
|
|
|
"serialize-status-cache-ms",
|
|
|
|
status_cache_serialize.as_ms() as usize
|
|
|
|
);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-07-01 10:20:56 -07:00
|
|
|
/// Remove the snapshot directory for this slot
|
2021-08-21 13:41:03 -07:00
|
|
|
pub fn remove_bank_snapshot<P>(slot: Slot, bank_snapshots_dir: P) -> Result<()>
|
2021-07-22 12:40:37 -07:00
|
|
|
where
|
|
|
|
P: AsRef<Path>,
|
|
|
|
{
|
2021-08-21 13:41:03 -07:00
|
|
|
let bank_snapshot_dir = get_bank_snapshots_dir(&bank_snapshots_dir, slot);
|
2021-07-22 12:40:37 -07:00
|
|
|
fs::remove_dir_all(bank_snapshot_dir)?;
|
2019-07-31 17:58:10 -07:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-06-14 15:46:49 -07:00
|
|
|
#[derive(Debug, Default)]
|
|
|
|
pub struct BankFromArchiveTimings {
|
|
|
|
pub rebuild_bank_from_snapshots_us: u64,
|
2021-07-22 12:40:37 -07:00
|
|
|
pub full_snapshot_untar_us: u64,
|
|
|
|
pub incremental_snapshot_untar_us: u64,
|
2021-06-14 15:46:49 -07:00
|
|
|
pub verify_snapshot_bank_us: u64,
|
|
|
|
}
|
|
|
|
|
2022-05-17 07:32:18 -07:00
|
|
|
// From testing, 4 seems to be a sweet spot for ranges of 60M-360M accounts and 16-64 cores. This may need to be tuned later.
|
|
|
|
const PARALLEL_UNTAR_READERS_DEFAULT: usize = 4;
|
2021-06-29 16:26:15 -07:00
|
|
|
|
2022-07-06 15:30:30 -07:00
|
|
|
fn verify_and_unarchive_snapshots(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
2021-08-06 18:16:06 -07:00
|
|
|
full_snapshot_archive_info: &FullSnapshotArchiveInfo,
|
|
|
|
incremental_snapshot_archive_info: Option<&IncrementalSnapshotArchiveInfo>,
|
2022-07-06 15:30:30 -07:00
|
|
|
account_paths: &[PathBuf],
|
2022-08-29 11:17:27 -07:00
|
|
|
) -> Result<(UnarchivedSnapshot, Option<UnarchivedSnapshot>, AtomicU32)> {
|
2021-08-06 18:16:06 -07:00
|
|
|
check_are_snapshots_compatible(
|
|
|
|
full_snapshot_archive_info,
|
|
|
|
incremental_snapshot_archive_info,
|
|
|
|
)?;
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let parallel_divisions = std::cmp::min(
|
2021-06-29 16:26:15 -07:00
|
|
|
PARALLEL_UNTAR_READERS_DEFAULT,
|
|
|
|
std::cmp::max(1, num_cpus::get() / 4),
|
|
|
|
);
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2022-08-29 11:17:27 -07:00
|
|
|
let next_append_vec_id = Arc::new(AtomicU32::new(0));
|
2021-07-22 12:40:37 -07:00
|
|
|
let unarchived_full_snapshot = unarchive_snapshot(
|
2021-08-21 13:41:03 -07:00
|
|
|
&bank_snapshots_dir,
|
2021-08-31 16:33:27 -07:00
|
|
|
TMP_SNAPSHOT_ARCHIVE_PREFIX,
|
2021-08-06 18:16:06 -07:00
|
|
|
full_snapshot_archive_info.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
"snapshot untar",
|
2021-06-29 16:26:15 -07:00
|
|
|
account_paths,
|
2021-08-06 18:16:06 -07:00
|
|
|
full_snapshot_archive_info.archive_format(),
|
2021-07-22 12:40:37 -07:00
|
|
|
parallel_divisions,
|
2022-08-29 11:17:27 -07:00
|
|
|
next_append_vec_id.clone(),
|
2021-06-29 16:26:15 -07:00
|
|
|
)?;
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2022-07-06 15:30:30 -07:00
|
|
|
let unarchived_incremental_snapshot =
|
2021-08-06 18:16:06 -07:00
|
|
|
if let Some(incremental_snapshot_archive_info) = incremental_snapshot_archive_info {
|
2021-07-22 12:40:37 -07:00
|
|
|
let unarchived_incremental_snapshot = unarchive_snapshot(
|
2021-08-21 13:41:03 -07:00
|
|
|
&bank_snapshots_dir,
|
2021-08-31 16:33:27 -07:00
|
|
|
TMP_SNAPSHOT_ARCHIVE_PREFIX,
|
2021-08-06 18:16:06 -07:00
|
|
|
incremental_snapshot_archive_info.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
"incremental snapshot untar",
|
|
|
|
account_paths,
|
2021-08-06 18:16:06 -07:00
|
|
|
incremental_snapshot_archive_info.archive_format(),
|
2021-07-22 12:40:37 -07:00
|
|
|
parallel_divisions,
|
2022-08-29 11:17:27 -07:00
|
|
|
next_append_vec_id.clone(),
|
2021-07-22 12:40:37 -07:00
|
|
|
)?;
|
|
|
|
Some(unarchived_incremental_snapshot)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2020-01-21 21:06:21 -08:00
|
|
|
|
2022-08-29 11:17:27 -07:00
|
|
|
Ok((
|
|
|
|
unarchived_full_snapshot,
|
|
|
|
unarchived_incremental_snapshot,
|
|
|
|
Arc::try_unwrap(next_append_vec_id).unwrap(),
|
|
|
|
))
|
2022-07-06 15:30:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Utility for parsing out bank specific information from a snapshot archive. This utility can be used
|
|
|
|
/// to parse out bank specific information like the leader schedule, epoch schedule, etc.
|
|
|
|
pub fn bank_fields_from_snapshot_archives(
|
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
) -> Result<BankFieldsToDeserialize> {
|
|
|
|
let full_snapshot_archive_info =
|
|
|
|
get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir)
|
|
|
|
.ok_or(SnapshotError::NoSnapshotArchives)?;
|
|
|
|
|
|
|
|
let incremental_snapshot_archive_info = get_highest_incremental_snapshot_archive_info(
|
|
|
|
&incremental_snapshot_archives_dir,
|
|
|
|
full_snapshot_archive_info.slot(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let temp_dir = tempfile::Builder::new()
|
|
|
|
.prefix("dummy-accounts-path")
|
|
|
|
.tempdir()?;
|
|
|
|
|
|
|
|
let account_paths = vec![temp_dir.path().to_path_buf()];
|
|
|
|
|
2022-08-29 11:17:27 -07:00
|
|
|
let (unarchived_full_snapshot, unarchived_incremental_snapshot, _next_append_vec_id) =
|
2022-07-06 15:30:30 -07:00
|
|
|
verify_and_unarchive_snapshots(
|
|
|
|
&bank_snapshots_dir,
|
|
|
|
&full_snapshot_archive_info,
|
|
|
|
incremental_snapshot_archive_info.as_ref(),
|
|
|
|
&account_paths,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
bank_fields_from_snapshots(
|
|
|
|
&unarchived_full_snapshot.unpacked_snapshots_dir_and_version,
|
|
|
|
unarchived_incremental_snapshot
|
|
|
|
.as_ref()
|
|
|
|
.map(|unarchive_preparation_result| {
|
|
|
|
&unarchive_preparation_result.unpacked_snapshots_dir_and_version
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Rebuild bank from snapshot archives. Handles either just a full snapshot, or both a full
|
|
|
|
/// snapshot and an incremental snapshot.
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
pub fn bank_from_snapshot_archives(
|
|
|
|
account_paths: &[PathBuf],
|
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
|
|
|
full_snapshot_archive_info: &FullSnapshotArchiveInfo,
|
|
|
|
incremental_snapshot_archive_info: Option<&IncrementalSnapshotArchiveInfo>,
|
|
|
|
genesis_config: &GenesisConfig,
|
2022-08-05 12:49:00 -07:00
|
|
|
runtime_config: &RuntimeConfig,
|
2022-07-06 15:30:30 -07:00
|
|
|
debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
|
|
|
additional_builtins: Option<&Builtins>,
|
|
|
|
account_secondary_indexes: AccountSecondaryIndexes,
|
|
|
|
accounts_db_caching_enabled: bool,
|
|
|
|
limit_load_slot_count_from_snapshot: Option<usize>,
|
|
|
|
shrink_ratio: AccountShrinkThreshold,
|
|
|
|
test_hash_calculation: bool,
|
|
|
|
accounts_db_skip_shrink: bool,
|
|
|
|
verify_index: bool,
|
|
|
|
accounts_db_config: Option<AccountsDbConfig>,
|
|
|
|
accounts_update_notifier: Option<AccountsUpdateNotifier>,
|
2022-09-12 11:51:12 -07:00
|
|
|
exit: &Arc<AtomicBool>,
|
2022-07-06 15:30:30 -07:00
|
|
|
) -> Result<(Bank, BankFromArchiveTimings)> {
|
2022-08-29 11:17:27 -07:00
|
|
|
let (unarchived_full_snapshot, mut unarchived_incremental_snapshot, next_append_vec_id) =
|
2022-07-06 15:30:30 -07:00
|
|
|
verify_and_unarchive_snapshots(
|
|
|
|
bank_snapshots_dir,
|
|
|
|
full_snapshot_archive_info,
|
|
|
|
incremental_snapshot_archive_info,
|
|
|
|
account_paths,
|
|
|
|
)?;
|
|
|
|
|
2022-08-29 11:17:27 -07:00
|
|
|
let mut storage = unarchived_full_snapshot.storage;
|
2021-07-22 12:40:37 -07:00
|
|
|
if let Some(ref mut unarchive_preparation_result) = unarchived_incremental_snapshot {
|
2022-08-29 11:17:27 -07:00
|
|
|
let incremental_snapshot_storages =
|
|
|
|
std::mem::take(&mut unarchive_preparation_result.storage);
|
|
|
|
storage.extend(incremental_snapshot_storages.into_iter());
|
2021-07-22 12:40:37 -07:00
|
|
|
}
|
2020-01-21 21:06:21 -08:00
|
|
|
|
2022-08-29 11:17:27 -07:00
|
|
|
let storage_and_next_append_vec_id = StorageAndNextAppendVecId {
|
|
|
|
storage,
|
|
|
|
next_append_vec_id,
|
|
|
|
};
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let mut measure_rebuild = Measure::start("rebuild bank from snapshots");
|
2020-03-03 20:48:55 -08:00
|
|
|
let bank = rebuild_bank_from_snapshots(
|
2021-07-22 12:40:37 -07:00
|
|
|
&unarchived_full_snapshot.unpacked_snapshots_dir_and_version,
|
|
|
|
unarchived_incremental_snapshot
|
|
|
|
.as_ref()
|
|
|
|
.map(|unarchive_preparation_result| {
|
|
|
|
&unarchive_preparation_result.unpacked_snapshots_dir_and_version
|
|
|
|
}),
|
2021-03-10 09:49:10 -08:00
|
|
|
account_paths,
|
2022-08-29 11:17:27 -07:00
|
|
|
storage_and_next_append_vec_id,
|
2020-05-13 00:22:14 -07:00
|
|
|
genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
runtime_config,
|
2020-09-23 18:46:42 -07:00
|
|
|
debug_keys,
|
2020-09-24 12:23:09 -07:00
|
|
|
additional_builtins,
|
2021-07-22 12:40:37 -07:00
|
|
|
account_secondary_indexes,
|
2021-01-11 17:00:23 -08:00
|
|
|
accounts_db_caching_enabled,
|
2021-05-26 08:36:12 -07:00
|
|
|
limit_load_slot_count_from_snapshot,
|
2021-06-09 21:21:32 -07:00
|
|
|
shrink_ratio,
|
2021-07-13 09:06:18 -07:00
|
|
|
verify_index,
|
2021-09-07 21:30:38 -07:00
|
|
|
accounts_db_config,
|
2021-09-30 14:26:17 -07:00
|
|
|
accounts_update_notifier,
|
2022-09-12 11:51:12 -07:00
|
|
|
exit,
|
2019-09-25 13:42:19 -07:00
|
|
|
)?;
|
2021-07-22 12:40:37 -07:00
|
|
|
measure_rebuild.stop();
|
|
|
|
info!("{}", measure_rebuild);
|
2019-08-13 17:20:14 -07:00
|
|
|
|
2022-10-31 14:48:18 -07:00
|
|
|
verify_bank_against_expected_slot_hash(
|
|
|
|
&bank,
|
|
|
|
incremental_snapshot_archive_info.as_ref().map_or(
|
|
|
|
full_snapshot_archive_info.slot(),
|
|
|
|
|incremental_snapshot_archive_info| incremental_snapshot_archive_info.slot(),
|
|
|
|
),
|
|
|
|
incremental_snapshot_archive_info.as_ref().map_or(
|
|
|
|
*full_snapshot_archive_info.hash(),
|
|
|
|
|incremental_snapshot_archive_info| *incremental_snapshot_archive_info.hash(),
|
|
|
|
),
|
|
|
|
)?;
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let mut measure_verify = Measure::start("verify");
|
2021-08-12 13:56:08 -07:00
|
|
|
if !bank.verify_snapshot_bank(
|
|
|
|
test_hash_calculation,
|
2022-07-06 09:32:45 -07:00
|
|
|
accounts_db_skip_shrink || !full_snapshot_archive_info.is_remote(),
|
2022-08-18 10:24:23 -07:00
|
|
|
full_snapshot_archive_info.slot(),
|
2021-08-12 13:56:08 -07:00
|
|
|
) && limit_load_slot_count_from_snapshot.is_none()
|
2021-06-15 13:39:22 -07:00
|
|
|
{
|
2020-02-05 17:40:02 -08:00
|
|
|
panic!("Snapshot bank for slot {} failed to verify", bank.slot());
|
2019-09-20 13:21:12 -07:00
|
|
|
}
|
2021-07-22 12:40:37 -07:00
|
|
|
measure_verify.stop();
|
|
|
|
|
2021-06-14 15:46:49 -07:00
|
|
|
let timings = BankFromArchiveTimings {
|
2021-07-22 12:40:37 -07:00
|
|
|
rebuild_bank_from_snapshots_us: measure_rebuild.as_us(),
|
|
|
|
full_snapshot_untar_us: unarchived_full_snapshot.measure_untar.as_us(),
|
|
|
|
incremental_snapshot_untar_us: unarchived_incremental_snapshot
|
|
|
|
.map_or(0, |unarchive_preparation_result| {
|
|
|
|
unarchive_preparation_result.measure_untar.as_us()
|
|
|
|
}),
|
|
|
|
verify_snapshot_bank_us: measure_verify.as_us(),
|
2021-06-14 15:46:49 -07:00
|
|
|
};
|
|
|
|
Ok((bank, timings))
|
2019-08-13 17:20:14 -07:00
|
|
|
}
|
|
|
|
|
2022-05-10 13:37:41 -07:00
|
|
|
/// Rebuild bank from snapshot archives. This function searches `full_snapshot_archives_dir` and `incremental_snapshot_archives_dir` for the
|
2021-08-06 18:16:06 -07:00
|
|
|
/// highest full snapshot and highest corresponding incremental snapshot, then rebuilds the bank.
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
pub fn bank_from_latest_snapshot_archives(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
2021-08-06 18:16:06 -07:00
|
|
|
account_paths: &[PathBuf],
|
|
|
|
genesis_config: &GenesisConfig,
|
2022-08-05 12:49:00 -07:00
|
|
|
runtime_config: &RuntimeConfig,
|
2021-08-06 18:16:06 -07:00
|
|
|
debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
|
|
|
additional_builtins: Option<&Builtins>,
|
|
|
|
account_secondary_indexes: AccountSecondaryIndexes,
|
|
|
|
accounts_db_caching_enabled: bool,
|
|
|
|
limit_load_slot_count_from_snapshot: Option<usize>,
|
|
|
|
shrink_ratio: AccountShrinkThreshold,
|
|
|
|
test_hash_calculation: bool,
|
|
|
|
accounts_db_skip_shrink: bool,
|
|
|
|
verify_index: bool,
|
2021-09-07 21:30:38 -07:00
|
|
|
accounts_db_config: Option<AccountsDbConfig>,
|
2021-09-30 14:26:17 -07:00
|
|
|
accounts_update_notifier: Option<AccountsUpdateNotifier>,
|
2022-09-12 11:51:12 -07:00
|
|
|
exit: &Arc<AtomicBool>,
|
2021-09-07 13:43:43 -07:00
|
|
|
) -> Result<(
|
|
|
|
Bank,
|
|
|
|
FullSnapshotArchiveInfo,
|
|
|
|
Option<IncrementalSnapshotArchiveInfo>,
|
|
|
|
)> {
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archive_info =
|
|
|
|
get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir)
|
|
|
|
.ok_or(SnapshotError::NoSnapshotArchives)?;
|
2021-08-06 18:16:06 -07:00
|
|
|
|
|
|
|
let incremental_snapshot_archive_info = get_highest_incremental_snapshot_archive_info(
|
2022-05-10 13:37:41 -07:00
|
|
|
&incremental_snapshot_archives_dir,
|
2021-08-06 18:16:06 -07:00
|
|
|
full_snapshot_archive_info.slot(),
|
|
|
|
);
|
|
|
|
|
|
|
|
info!(
|
|
|
|
"Loading bank from full snapshot: {}, and incremental snapshot: {:?}",
|
|
|
|
full_snapshot_archive_info.path().display(),
|
|
|
|
incremental_snapshot_archive_info
|
|
|
|
.as_ref()
|
|
|
|
.map(
|
|
|
|
|incremental_snapshot_archive_info| incremental_snapshot_archive_info
|
|
|
|
.path()
|
|
|
|
.display()
|
|
|
|
)
|
|
|
|
);
|
|
|
|
|
|
|
|
let (bank, timings) = bank_from_snapshot_archives(
|
|
|
|
account_paths,
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.as_ref(),
|
2021-08-06 18:16:06 -07:00
|
|
|
&full_snapshot_archive_info,
|
|
|
|
incremental_snapshot_archive_info.as_ref(),
|
|
|
|
genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
runtime_config,
|
2021-08-06 18:16:06 -07:00
|
|
|
debug_keys,
|
|
|
|
additional_builtins,
|
|
|
|
account_secondary_indexes,
|
|
|
|
accounts_db_caching_enabled,
|
|
|
|
limit_load_slot_count_from_snapshot,
|
|
|
|
shrink_ratio,
|
|
|
|
test_hash_calculation,
|
|
|
|
accounts_db_skip_shrink,
|
|
|
|
verify_index,
|
2021-09-07 21:30:38 -07:00
|
|
|
accounts_db_config,
|
2021-09-30 14:26:17 -07:00
|
|
|
accounts_update_notifier,
|
2022-09-12 11:51:12 -07:00
|
|
|
exit,
|
2021-08-06 18:16:06 -07:00
|
|
|
)?;
|
|
|
|
|
2022-03-05 04:33:25 -08:00
|
|
|
datapoint_info!(
|
|
|
|
"bank_from_snapshot_archives",
|
|
|
|
(
|
|
|
|
"full_snapshot_untar_us",
|
|
|
|
timings.full_snapshot_untar_us,
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"incremental_snapshot_untar_us",
|
|
|
|
timings.incremental_snapshot_untar_us,
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"rebuild_bank_from_snapshots_us",
|
|
|
|
timings.rebuild_bank_from_snapshots_us,
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"verify_snapshot_bank_us",
|
|
|
|
timings.verify_snapshot_bank_us,
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
);
|
|
|
|
|
2021-09-07 13:43:43 -07:00
|
|
|
Ok((
|
|
|
|
bank,
|
|
|
|
full_snapshot_archive_info,
|
|
|
|
incremental_snapshot_archive_info,
|
|
|
|
))
|
2021-08-06 18:16:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Check to make sure the deserialized bank's slot and hash matches the snapshot archive's slot
|
|
|
|
/// and hash
|
|
|
|
fn verify_bank_against_expected_slot_hash(
|
|
|
|
bank: &Bank,
|
|
|
|
expected_slot: Slot,
|
2022-10-31 11:28:35 -07:00
|
|
|
expected_hash: SnapshotHash,
|
2021-08-06 18:16:06 -07:00
|
|
|
) -> Result<()> {
|
|
|
|
let bank_slot = bank.slot();
|
2022-10-31 11:28:35 -07:00
|
|
|
let bank_hash = bank.get_snapshot_hash();
|
2021-08-06 18:16:06 -07:00
|
|
|
|
|
|
|
if bank_slot != expected_slot || bank_hash != expected_hash {
|
|
|
|
return Err(SnapshotError::MismatchedSlotHash(
|
|
|
|
(bank_slot, bank_hash),
|
|
|
|
(expected_slot, expected_hash),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-08-29 11:17:27 -07:00
|
|
|
/// Spawns a thread for unpacking a snapshot
|
|
|
|
fn spawn_unpack_snapshot_thread(
|
|
|
|
file_sender: Sender<PathBuf>,
|
|
|
|
account_paths: Arc<Vec<PathBuf>>,
|
|
|
|
ledger_dir: Arc<PathBuf>,
|
|
|
|
mut archive: Archive<SharedBufferReader>,
|
|
|
|
parallel_selector: Option<ParallelSelector>,
|
|
|
|
thread_index: usize,
|
|
|
|
) -> JoinHandle<()> {
|
|
|
|
Builder::new()
|
2022-09-26 12:51:31 -07:00
|
|
|
.name(format!("solUnpkSnpsht{thread_index:02}"))
|
2022-08-29 11:17:27 -07:00
|
|
|
.spawn(move || {
|
|
|
|
streaming_unpack_snapshot(
|
|
|
|
&mut archive,
|
|
|
|
ledger_dir.as_path(),
|
|
|
|
&account_paths,
|
|
|
|
parallel_selector,
|
|
|
|
&file_sender,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Streams unpacked files across channel
|
|
|
|
fn streaming_unarchive_snapshot(
|
|
|
|
file_sender: Sender<PathBuf>,
|
|
|
|
account_paths: Vec<PathBuf>,
|
|
|
|
ledger_dir: PathBuf,
|
|
|
|
snapshot_archive_path: PathBuf,
|
|
|
|
archive_format: ArchiveFormat,
|
|
|
|
num_threads: usize,
|
|
|
|
) -> Vec<JoinHandle<()>> {
|
|
|
|
let account_paths = Arc::new(account_paths);
|
|
|
|
let ledger_dir = Arc::new(ledger_dir);
|
|
|
|
let shared_buffer = untar_snapshot_create_shared_buffer(&snapshot_archive_path, archive_format);
|
|
|
|
|
2022-08-31 20:49:02 -07:00
|
|
|
// All shared buffer readers need to be created before the threads are spawned
|
|
|
|
#[allow(clippy::needless_collect)]
|
|
|
|
let archives: Vec<_> = (0..num_threads)
|
|
|
|
.map(|_| {
|
|
|
|
let reader = SharedBufferReader::new(&shared_buffer);
|
|
|
|
Archive::new(reader)
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
archives
|
|
|
|
.into_iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(thread_index, archive)| {
|
2022-08-29 11:17:27 -07:00
|
|
|
let parallel_selector = Some(ParallelSelector {
|
|
|
|
index: thread_index,
|
|
|
|
divisions: num_threads,
|
|
|
|
});
|
|
|
|
|
|
|
|
spawn_unpack_snapshot_thread(
|
|
|
|
file_sender.clone(),
|
|
|
|
account_paths.clone(),
|
|
|
|
ledger_dir.clone(),
|
|
|
|
archive,
|
|
|
|
parallel_selector,
|
|
|
|
thread_index,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Perform the common tasks when unarchiving a snapshot. Handles creating the temporary
|
|
|
|
/// directories, untaring, reading the version file, and then returning those fields plus the
|
2022-08-29 11:17:27 -07:00
|
|
|
/// rebuilt storage
|
2021-07-22 12:40:37 -07:00
|
|
|
fn unarchive_snapshot<P, Q>(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir: P,
|
2021-07-22 12:40:37 -07:00
|
|
|
unpacked_snapshots_dir_prefix: &'static str,
|
|
|
|
snapshot_archive_path: Q,
|
|
|
|
measure_name: &'static str,
|
|
|
|
account_paths: &[PathBuf],
|
|
|
|
archive_format: ArchiveFormat,
|
|
|
|
parallel_divisions: usize,
|
2022-08-29 11:17:27 -07:00
|
|
|
next_append_vec_id: Arc<AtomicU32>,
|
2021-07-22 12:40:37 -07:00
|
|
|
) -> Result<UnarchivedSnapshot>
|
|
|
|
where
|
|
|
|
P: AsRef<Path>,
|
|
|
|
Q: AsRef<Path>,
|
|
|
|
{
|
|
|
|
let unpack_dir = tempfile::Builder::new()
|
|
|
|
.prefix(unpacked_snapshots_dir_prefix)
|
2021-08-21 13:41:03 -07:00
|
|
|
.tempdir_in(bank_snapshots_dir)?;
|
2021-07-22 12:40:37 -07:00
|
|
|
let unpacked_snapshots_dir = unpack_dir.path().join("snapshots");
|
|
|
|
|
2022-08-29 11:17:27 -07:00
|
|
|
let (file_sender, file_receiver) = crossbeam_channel::unbounded();
|
|
|
|
streaming_unarchive_snapshot(
|
|
|
|
file_sender,
|
|
|
|
account_paths.to_vec(),
|
|
|
|
unpack_dir.path().to_path_buf(),
|
|
|
|
snapshot_archive_path.as_ref().to_path_buf(),
|
2021-07-22 12:40:37 -07:00
|
|
|
archive_format,
|
|
|
|
parallel_divisions,
|
2022-08-29 11:17:27 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
let num_rebuilder_threads = num_cpus::get_physical()
|
|
|
|
.saturating_sub(parallel_divisions)
|
|
|
|
.max(1);
|
2022-09-22 09:44:25 -07:00
|
|
|
let (version_and_storages, measure_untar) = measure!(
|
2022-08-29 11:17:27 -07:00
|
|
|
SnapshotStorageRebuilder::rebuild_storage(
|
|
|
|
file_receiver,
|
|
|
|
num_rebuilder_threads,
|
|
|
|
next_append_vec_id
|
2022-09-22 09:44:25 -07:00
|
|
|
)?,
|
2022-08-29 11:17:27 -07:00
|
|
|
measure_name
|
|
|
|
);
|
2021-07-22 12:40:37 -07:00
|
|
|
info!("{}", measure_untar);
|
|
|
|
|
2022-09-22 09:44:25 -07:00
|
|
|
let RebuiltSnapshotStorage {
|
|
|
|
snapshot_version,
|
|
|
|
storage,
|
|
|
|
} = version_and_storages;
|
2021-07-22 12:40:37 -07:00
|
|
|
Ok(UnarchivedSnapshot {
|
|
|
|
unpack_dir,
|
2022-08-29 11:17:27 -07:00
|
|
|
storage,
|
2021-07-22 12:40:37 -07:00
|
|
|
unpacked_snapshots_dir_and_version: UnpackedSnapshotsDirAndVersion {
|
|
|
|
unpacked_snapshots_dir,
|
|
|
|
snapshot_version,
|
|
|
|
},
|
|
|
|
measure_untar,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-12-17 10:27:54 -08:00
|
|
|
/// Reads the `snapshot_version` from a file. Before opening the file, its size
|
|
|
|
/// is compared to `MAX_SNAPSHOT_VERSION_FILE_SIZE`. If the size exceeds this
|
|
|
|
/// threshold, it is not opened and an error is returned.
|
|
|
|
fn snapshot_version_from_file(path: impl AsRef<Path>) -> Result<String> {
|
|
|
|
// Check file size.
|
|
|
|
let file_size = fs::metadata(&path)?.len();
|
|
|
|
if file_size > MAX_SNAPSHOT_VERSION_FILE_SIZE {
|
|
|
|
let error_message = format!(
|
|
|
|
"snapshot version file too large: {} has {} bytes (max size is {} bytes)",
|
|
|
|
path.as_ref().display(),
|
|
|
|
file_size,
|
|
|
|
MAX_SNAPSHOT_VERSION_FILE_SIZE,
|
|
|
|
);
|
|
|
|
return Err(get_io_error(&error_message));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read snapshot_version from file.
|
|
|
|
let mut snapshot_version = String::new();
|
|
|
|
File::open(path).and_then(|mut f| f.read_to_string(&mut snapshot_version))?;
|
|
|
|
Ok(snapshot_version.trim().to_string())
|
|
|
|
}
|
|
|
|
|
2021-08-06 18:16:06 -07:00
|
|
|
/// Check if an incremental snapshot is compatible with a full snapshot. This is done by checking
|
|
|
|
/// if the incremental snapshot's base slot is the same as the full snapshot's slot.
|
|
|
|
fn check_are_snapshots_compatible(
|
|
|
|
full_snapshot_archive_info: &FullSnapshotArchiveInfo,
|
|
|
|
incremental_snapshot_archive_info: Option<&IncrementalSnapshotArchiveInfo>,
|
|
|
|
) -> Result<()> {
|
|
|
|
if incremental_snapshot_archive_info.is_none() {
|
|
|
|
return Ok(());
|
|
|
|
}
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2021-08-06 18:16:06 -07:00
|
|
|
let incremental_snapshot_archive_info = incremental_snapshot_archive_info.unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2021-08-06 18:16:06 -07:00
|
|
|
(full_snapshot_archive_info.slot() == incremental_snapshot_archive_info.base_slot())
|
2022-08-22 18:01:03 -07:00
|
|
|
.then_some(())
|
2021-08-06 18:16:06 -07:00
|
|
|
.ok_or_else(|| {
|
|
|
|
SnapshotError::MismatchedBaseSlot(
|
|
|
|
full_snapshot_archive_info.slot(),
|
|
|
|
incremental_snapshot_archive_info.base_slot(),
|
|
|
|
)
|
|
|
|
})
|
2021-07-22 12:40:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the `&str` from a `&Path`
|
2021-08-08 05:57:06 -07:00
|
|
|
pub fn path_to_file_name_str(path: &Path) -> Result<&str> {
|
2021-07-22 12:40:37 -07:00
|
|
|
path.file_name()
|
|
|
|
.ok_or_else(|| SnapshotError::PathToFileNameError(path.to_path_buf()))?
|
|
|
|
.to_str()
|
|
|
|
.ok_or_else(|| SnapshotError::FileNameToStrError(path.to_path_buf()))
|
|
|
|
}
|
|
|
|
|
2022-03-14 12:03:59 -07:00
|
|
|
pub fn build_snapshot_archives_remote_dir(snapshot_archives_dir: impl AsRef<Path>) -> PathBuf {
|
|
|
|
snapshot_archives_dir
|
|
|
|
.as_ref()
|
|
|
|
.join(SNAPSHOT_ARCHIVE_DOWNLOAD_DIR)
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Build the full snapshot archive path from its components: the snapshot archives directory, the
|
2021-07-01 10:20:56 -07:00
|
|
|
/// snapshot slot, the accounts hash, and the archive format.
|
2021-07-22 12:40:37 -07:00
|
|
|
pub fn build_full_snapshot_archive_path(
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
2021-07-01 10:20:56 -07:00
|
|
|
slot: Slot,
|
2022-10-31 11:28:35 -07:00
|
|
|
hash: &SnapshotHash,
|
2021-01-21 18:34:51 -08:00
|
|
|
archive_format: ArchiveFormat,
|
2020-02-24 12:37:14 -08:00
|
|
|
) -> PathBuf {
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.as_ref().join(format!(
|
2021-05-27 10:00:27 -07:00
|
|
|
"snapshot-{}-{}.{}",
|
2021-07-01 10:20:56 -07:00
|
|
|
slot,
|
2022-10-31 11:28:35 -07:00
|
|
|
hash.0,
|
2022-03-09 14:09:34 -08:00
|
|
|
archive_format.extension(),
|
2020-02-24 12:37:14 -08:00
|
|
|
))
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Build the incremental snapshot archive path from its components: the snapshot archives
|
|
|
|
/// directory, the snapshot base slot, the snapshot slot, the accounts hash, and the archive
|
|
|
|
/// format.
|
|
|
|
pub fn build_incremental_snapshot_archive_path(
|
2022-05-10 13:37:41 -07:00
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
2021-07-22 12:40:37 -07:00
|
|
|
base_slot: Slot,
|
|
|
|
slot: Slot,
|
2022-10-31 11:28:35 -07:00
|
|
|
hash: &SnapshotHash,
|
2021-07-22 12:40:37 -07:00
|
|
|
archive_format: ArchiveFormat,
|
|
|
|
) -> PathBuf {
|
2022-05-10 13:37:41 -07:00
|
|
|
incremental_snapshot_archives_dir.as_ref().join(format!(
|
2021-07-22 12:40:37 -07:00
|
|
|
"incremental-snapshot-{}-{}-{}.{}",
|
|
|
|
base_slot,
|
|
|
|
slot,
|
2022-10-31 11:28:35 -07:00
|
|
|
hash.0,
|
2022-03-09 14:09:34 -08:00
|
|
|
archive_format.extension(),
|
2021-07-22 12:40:37 -07:00
|
|
|
))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Parse a full snapshot archive filename into its Slot, Hash, and Archive Format
|
2022-03-02 09:52:20 -08:00
|
|
|
pub(crate) fn parse_full_snapshot_archive_filename(
|
2021-07-22 12:40:37 -07:00
|
|
|
archive_filename: &str,
|
2022-10-31 11:28:35 -07:00
|
|
|
) -> Result<(Slot, SnapshotHash, ArchiveFormat)> {
|
2021-07-22 12:40:37 -07:00
|
|
|
lazy_static! {
|
|
|
|
static ref RE: Regex = Regex::new(FULL_SNAPSHOT_ARCHIVE_FILENAME_REGEX).unwrap();
|
|
|
|
}
|
2021-07-01 10:20:56 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let do_parse = || {
|
|
|
|
RE.captures(archive_filename).and_then(|captures| {
|
|
|
|
let slot = captures
|
|
|
|
.name("slot")
|
|
|
|
.map(|x| x.as_str().parse::<Slot>())?
|
|
|
|
.ok()?;
|
|
|
|
let hash = captures
|
|
|
|
.name("hash")
|
|
|
|
.map(|x| x.as_str().parse::<Hash>())?
|
|
|
|
.ok()?;
|
|
|
|
let archive_format = captures
|
|
|
|
.name("ext")
|
2022-03-09 14:09:34 -08:00
|
|
|
.map(|x| x.as_str().parse::<ArchiveFormat>())?
|
|
|
|
.ok()?;
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2022-10-31 11:28:35 -07:00
|
|
|
Some((slot, SnapshotHash(hash), archive_format))
|
2021-07-22 12:40:37 -07:00
|
|
|
})
|
|
|
|
};
|
2021-07-01 10:20:56 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
do_parse().ok_or_else(|| {
|
|
|
|
SnapshotError::ParseSnapshotArchiveFileNameError(archive_filename.to_string())
|
2021-07-01 10:20:56 -07:00
|
|
|
})
|
2020-02-24 12:37:14 -08:00
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Parse an incremental snapshot archive filename into its base Slot, actual Slot, Hash, and Archive Format
|
2022-03-02 09:52:20 -08:00
|
|
|
pub(crate) fn parse_incremental_snapshot_archive_filename(
|
2021-07-22 12:40:37 -07:00
|
|
|
archive_filename: &str,
|
2022-10-31 11:28:35 -07:00
|
|
|
) -> Result<(Slot, Slot, SnapshotHash, ArchiveFormat)> {
|
2021-07-22 12:40:37 -07:00
|
|
|
lazy_static! {
|
|
|
|
static ref RE: Regex = Regex::new(INCREMENTAL_SNAPSHOT_ARCHIVE_FILENAME_REGEX).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
let do_parse = || {
|
|
|
|
RE.captures(archive_filename).and_then(|captures| {
|
|
|
|
let base_slot = captures
|
|
|
|
.name("base")
|
|
|
|
.map(|x| x.as_str().parse::<Slot>())?
|
|
|
|
.ok()?;
|
|
|
|
let slot = captures
|
|
|
|
.name("slot")
|
|
|
|
.map(|x| x.as_str().parse::<Slot>())?
|
|
|
|
.ok()?;
|
|
|
|
let hash = captures
|
|
|
|
.name("hash")
|
|
|
|
.map(|x| x.as_str().parse::<Hash>())?
|
|
|
|
.ok()?;
|
|
|
|
let archive_format = captures
|
|
|
|
.name("ext")
|
2022-03-09 14:09:34 -08:00
|
|
|
.map(|x| x.as_str().parse::<ArchiveFormat>())?
|
|
|
|
.ok()?;
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2022-10-31 11:28:35 -07:00
|
|
|
Some((base_slot, slot, SnapshotHash(hash), archive_format))
|
2021-07-22 12:40:37 -07:00
|
|
|
})
|
|
|
|
};
|
|
|
|
|
|
|
|
do_parse().ok_or_else(|| {
|
|
|
|
SnapshotError::ParseSnapshotArchiveFileNameError(archive_filename.to_string())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-03-14 12:03:59 -07:00
|
|
|
/// Walk down the snapshot archive to collect snapshot archive file info
|
|
|
|
fn get_snapshot_archives<T, F>(snapshot_archives_dir: &Path, cb: F) -> Vec<T>
|
|
|
|
where
|
|
|
|
F: Fn(PathBuf) -> Result<T>,
|
|
|
|
{
|
|
|
|
let walk_dir = |dir: &Path| -> Vec<T> {
|
|
|
|
let entry_iter = fs::read_dir(dir);
|
|
|
|
match entry_iter {
|
|
|
|
Err(err) => {
|
|
|
|
info!(
|
|
|
|
"Unable to read snapshot archives directory: err: {}, path: {}",
|
|
|
|
err,
|
|
|
|
dir.display()
|
|
|
|
);
|
|
|
|
vec![]
|
|
|
|
}
|
|
|
|
Ok(entries) => entries
|
|
|
|
.filter_map(|entry| entry.map_or(None, |entry| cb(entry.path()).ok()))
|
|
|
|
.collect(),
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut ret = walk_dir(snapshot_archives_dir);
|
2022-03-15 06:56:22 -07:00
|
|
|
let remote_dir = build_snapshot_archives_remote_dir(snapshot_archives_dir);
|
|
|
|
if remote_dir.exists() {
|
|
|
|
ret.append(&mut walk_dir(remote_dir.as_ref()));
|
|
|
|
}
|
2022-03-14 12:03:59 -07:00
|
|
|
ret
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get a list of the full snapshot archives from a directory
|
2022-05-10 13:37:41 -07:00
|
|
|
pub fn get_full_snapshot_archives(
|
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
) -> Vec<FullSnapshotArchiveInfo> {
|
2022-03-14 12:03:59 -07:00
|
|
|
get_snapshot_archives(
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.as_ref(),
|
2022-03-14 12:03:59 -07:00
|
|
|
FullSnapshotArchiveInfo::new_from_path,
|
|
|
|
)
|
2021-07-22 12:40:37 -07:00
|
|
|
}
|
|
|
|
|
2022-03-14 12:03:59 -07:00
|
|
|
/// Get a list of the incremental snapshot archives from a directory
|
2022-05-10 13:37:41 -07:00
|
|
|
pub fn get_incremental_snapshot_archives(
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
) -> Vec<IncrementalSnapshotArchiveInfo> {
|
2022-03-14 12:03:59 -07:00
|
|
|
get_snapshot_archives(
|
2022-05-10 13:37:41 -07:00
|
|
|
incremental_snapshot_archives_dir.as_ref(),
|
2022-03-14 12:03:59 -07:00
|
|
|
IncrementalSnapshotArchiveInfo::new_from_path,
|
|
|
|
)
|
2020-02-24 12:37:14 -08:00
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Get the highest slot of the full snapshot archives in a directory
|
2022-05-10 13:37:41 -07:00
|
|
|
pub fn get_highest_full_snapshot_archive_slot(
|
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
) -> Option<Slot> {
|
|
|
|
get_highest_full_snapshot_archive_info(full_snapshot_archives_dir)
|
2021-08-06 18:16:06 -07:00
|
|
|
.map(|full_snapshot_archive_info| full_snapshot_archive_info.slot())
|
2021-07-01 10:20:56 -07:00
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Get the highest slot of the incremental snapshot archives in a directory, for a given full
|
|
|
|
/// snapshot slot
|
2022-05-10 13:37:41 -07:00
|
|
|
pub fn get_highest_incremental_snapshot_archive_slot(
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
2021-07-22 12:40:37 -07:00
|
|
|
full_snapshot_slot: Slot,
|
|
|
|
) -> Option<Slot> {
|
2022-05-10 13:37:41 -07:00
|
|
|
get_highest_incremental_snapshot_archive_info(
|
|
|
|
incremental_snapshot_archives_dir,
|
|
|
|
full_snapshot_slot,
|
|
|
|
)
|
|
|
|
.map(|incremental_snapshot_archive_info| incremental_snapshot_archive_info.slot())
|
2021-07-01 10:20:56 -07:00
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Get the path (and metadata) for the full snapshot archive with the highest slot in a directory
|
2022-05-10 13:37:41 -07:00
|
|
|
pub fn get_highest_full_snapshot_archive_info(
|
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
) -> Option<FullSnapshotArchiveInfo> {
|
|
|
|
let mut full_snapshot_archives = get_full_snapshot_archives(full_snapshot_archives_dir);
|
2021-07-22 12:40:37 -07:00
|
|
|
full_snapshot_archives.sort_unstable();
|
|
|
|
full_snapshot_archives.into_iter().rev().next()
|
2021-07-01 10:20:56 -07:00
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Get the path for the incremental snapshot archive with the highest slot, for a given full
|
|
|
|
/// snapshot slot, in a directory
|
2022-05-10 13:37:41 -07:00
|
|
|
pub fn get_highest_incremental_snapshot_archive_info(
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
2021-07-22 12:40:37 -07:00
|
|
|
full_snapshot_slot: Slot,
|
2022-05-10 13:37:41 -07:00
|
|
|
) -> Option<IncrementalSnapshotArchiveInfo> {
|
2021-07-22 12:40:37 -07:00
|
|
|
// Since we want to filter down to only the incremental snapshot archives that have the same
|
|
|
|
// full snapshot slot as the value passed in, perform the filtering before sorting to avoid
|
|
|
|
// doing unnecessary work.
|
|
|
|
let mut incremental_snapshot_archives =
|
2022-05-10 13:37:41 -07:00
|
|
|
get_incremental_snapshot_archives(incremental_snapshot_archives_dir)
|
2021-07-22 12:40:37 -07:00
|
|
|
.into_iter()
|
|
|
|
.filter(|incremental_snapshot_archive_info| {
|
2021-08-06 18:16:06 -07:00
|
|
|
incremental_snapshot_archive_info.base_slot() == full_snapshot_slot
|
2021-07-22 12:40:37 -07:00
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
incremental_snapshot_archives.sort_unstable();
|
|
|
|
incremental_snapshot_archives.into_iter().rev().next()
|
2019-08-13 17:20:14 -07:00
|
|
|
}
|
|
|
|
|
2022-05-10 13:37:41 -07:00
|
|
|
pub fn purge_old_snapshot_archives(
|
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
2021-09-04 05:37:29 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain: usize,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain: usize,
|
2022-05-10 13:37:41 -07:00
|
|
|
) {
|
2021-05-12 10:32:27 -07:00
|
|
|
info!(
|
2022-05-10 13:37:41 -07:00
|
|
|
"Purging old full snapshot archives in {}, retaining up to {} full snapshots",
|
|
|
|
full_snapshot_archives_dir.as_ref().display(),
|
|
|
|
maximum_full_snapshot_archives_to_retain
|
2021-05-12 10:32:27 -07:00
|
|
|
);
|
2022-04-27 09:40:03 -07:00
|
|
|
|
2022-05-10 13:37:41 -07:00
|
|
|
let mut full_snapshot_archives = get_full_snapshot_archives(&full_snapshot_archives_dir);
|
2022-04-27 09:40:03 -07:00
|
|
|
full_snapshot_archives.sort_unstable();
|
|
|
|
full_snapshot_archives.reverse();
|
|
|
|
|
|
|
|
let num_to_retain = full_snapshot_archives.len().min(
|
|
|
|
maximum_full_snapshot_archives_to_retain
|
|
|
|
.max(1 /* Always keep at least one full snapshot */),
|
|
|
|
);
|
2022-03-11 13:01:48 -08:00
|
|
|
trace!(
|
2022-04-27 09:40:03 -07:00
|
|
|
"There are {} full snapshot archives, retaining {}",
|
|
|
|
full_snapshot_archives.len(),
|
|
|
|
num_to_retain,
|
2022-03-11 13:01:48 -08:00
|
|
|
);
|
|
|
|
|
2022-04-27 09:40:03 -07:00
|
|
|
let (full_snapshot_archives_to_retain, full_snapshot_archives_to_remove) =
|
|
|
|
if full_snapshot_archives.is_empty() {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some(full_snapshot_archives.split_at(num_to_retain))
|
|
|
|
}
|
|
|
|
.unwrap_or_default();
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2022-04-27 09:40:03 -07:00
|
|
|
let retained_full_snapshot_slots = full_snapshot_archives_to_retain
|
|
|
|
.iter()
|
|
|
|
.map(|ai| ai.slot())
|
|
|
|
.collect::<HashSet<_>>();
|
|
|
|
|
|
|
|
fn remove_archives<T: SnapshotArchiveInfoGetter>(archives: &[T]) {
|
|
|
|
for path in archives.iter().map(|a| a.path()) {
|
|
|
|
trace!("Removing snapshot archive: {}", path.display());
|
|
|
|
fs::remove_file(path)
|
|
|
|
.unwrap_or_else(|err| info!("Failed to remove {}: {}", path.display(), err));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
remove_archives(full_snapshot_archives_to_remove);
|
|
|
|
|
2022-05-10 13:37:41 -07:00
|
|
|
info!(
|
|
|
|
"Purging old incremental snapshot archives in {}, retaining up to {} incremental snapshots",
|
|
|
|
incremental_snapshot_archives_dir.as_ref().display(),
|
|
|
|
maximum_incremental_snapshot_archives_to_retain
|
|
|
|
);
|
2022-04-27 09:40:03 -07:00
|
|
|
let mut incremental_snapshot_archives_by_base_slot = HashMap::<Slot, Vec<_>>::new();
|
2022-05-10 13:37:41 -07:00
|
|
|
for incremental_snapshot_archive in
|
|
|
|
get_incremental_snapshot_archives(&incremental_snapshot_archives_dir)
|
|
|
|
{
|
2022-04-27 09:40:03 -07:00
|
|
|
incremental_snapshot_archives_by_base_slot
|
|
|
|
.entry(incremental_snapshot_archive.base_slot())
|
|
|
|
.or_default()
|
|
|
|
.push(incremental_snapshot_archive)
|
|
|
|
}
|
2021-09-04 05:37:29 -07:00
|
|
|
|
2022-04-27 09:40:03 -07:00
|
|
|
let highest_full_snapshot_slot = retained_full_snapshot_slots.iter().max().copied();
|
|
|
|
for (base_slot, mut incremental_snapshot_archives) in incremental_snapshot_archives_by_base_slot
|
|
|
|
{
|
|
|
|
incremental_snapshot_archives.sort_unstable();
|
|
|
|
let num_to_retain = if Some(base_slot) == highest_full_snapshot_slot {
|
|
|
|
maximum_incremental_snapshot_archives_to_retain
|
|
|
|
} else {
|
2022-09-22 15:23:03 -07:00
|
|
|
usize::from(retained_full_snapshot_slots.contains(&base_slot))
|
2022-04-27 09:40:03 -07:00
|
|
|
};
|
2022-03-11 13:01:48 -08:00
|
|
|
trace!(
|
2022-04-27 09:40:03 -07:00
|
|
|
"There are {} incremental snapshot archives for base slot {}, removing {} of them",
|
|
|
|
incremental_snapshot_archives.len(),
|
|
|
|
base_slot,
|
|
|
|
incremental_snapshot_archives
|
|
|
|
.len()
|
|
|
|
.saturating_sub(num_to_retain),
|
2022-03-11 13:01:48 -08:00
|
|
|
);
|
2021-09-04 05:37:29 -07:00
|
|
|
|
2022-04-27 09:40:03 -07:00
|
|
|
incremental_snapshot_archives.truncate(
|
|
|
|
incremental_snapshot_archives
|
|
|
|
.len()
|
|
|
|
.saturating_sub(num_to_retain),
|
|
|
|
);
|
|
|
|
remove_archives(&incremental_snapshot_archives);
|
|
|
|
}
|
2020-12-20 22:37:33 -08:00
|
|
|
}
|
|
|
|
|
2022-07-19 13:30:30 -07:00
|
|
|
fn unpack_snapshot_local(
|
|
|
|
shared_buffer: SharedBuffer,
|
2021-06-29 16:26:15 -07:00
|
|
|
ledger_dir: &Path,
|
|
|
|
account_paths: &[PathBuf],
|
2022-07-19 11:15:33 -07:00
|
|
|
parallel_divisions: usize,
|
2021-06-29 16:26:15 -07:00
|
|
|
) -> Result<UnpackedAppendVecMap> {
|
2022-07-19 11:15:33 -07:00
|
|
|
assert!(parallel_divisions > 0);
|
2021-07-08 11:44:47 -07:00
|
|
|
|
|
|
|
// allocate all readers before any readers start reading
|
2022-07-19 11:15:33 -07:00
|
|
|
let readers = (0..parallel_divisions)
|
2021-06-29 16:26:15 -07:00
|
|
|
.into_iter()
|
2021-07-08 11:44:47 -07:00
|
|
|
.map(|_| SharedBufferReader::new(&shared_buffer))
|
2021-06-29 16:26:15 -07:00
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
2022-07-19 11:15:33 -07:00
|
|
|
// create 'parallel_divisions' # of parallel workers, each responsible for 1/parallel_divisions of all the files to extract.
|
2021-06-29 16:26:15 -07:00
|
|
|
let all_unpacked_append_vec_map = readers
|
|
|
|
.into_par_iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(index, reader)| {
|
|
|
|
let parallel_selector = Some(ParallelSelector {
|
|
|
|
index,
|
2022-07-19 11:15:33 -07:00
|
|
|
divisions: parallel_divisions,
|
2021-06-29 16:26:15 -07:00
|
|
|
});
|
|
|
|
let mut archive = Archive::new(reader);
|
|
|
|
unpack_snapshot(&mut archive, ledger_dir, account_paths, parallel_selector)
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
2022-05-16 06:49:17 -07:00
|
|
|
|
2021-06-29 16:26:15 -07:00
|
|
|
let mut unpacked_append_vec_map = UnpackedAppendVecMap::new();
|
|
|
|
for h in all_unpacked_append_vec_map {
|
|
|
|
unpacked_append_vec_map.extend(h?);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(unpacked_append_vec_map)
|
|
|
|
}
|
|
|
|
|
2022-07-19 13:30:30 -07:00
|
|
|
fn untar_snapshot_create_shared_buffer(
|
2022-05-16 06:49:17 -07:00
|
|
|
snapshot_tar: &Path,
|
2021-01-07 22:45:42 -08:00
|
|
|
archive_format: ArchiveFormat,
|
2022-07-19 16:15:41 -07:00
|
|
|
) -> SharedBuffer {
|
2022-09-22 15:23:03 -07:00
|
|
|
let open_file = || File::open(snapshot_tar).unwrap();
|
2022-07-19 16:15:41 -07:00
|
|
|
match archive_format {
|
2022-07-19 13:30:30 -07:00
|
|
|
ArchiveFormat::TarBzip2 => SharedBuffer::new(BzDecoder::new(BufReader::new(open_file()))),
|
|
|
|
ArchiveFormat::TarGzip => SharedBuffer::new(GzDecoder::new(BufReader::new(open_file()))),
|
|
|
|
ArchiveFormat::TarZstd => SharedBuffer::new(
|
|
|
|
zstd::stream::read::Decoder::new(BufReader::new(open_file())).unwrap(),
|
|
|
|
),
|
|
|
|
ArchiveFormat::TarLz4 => {
|
|
|
|
SharedBuffer::new(lz4::Decoder::new(BufReader::new(open_file())).unwrap())
|
|
|
|
}
|
|
|
|
ArchiveFormat::Tar => SharedBuffer::new(BufReader::new(open_file())),
|
2022-07-19 16:15:41 -07:00
|
|
|
}
|
2019-08-13 17:20:14 -07:00
|
|
|
}
|
|
|
|
|
2022-05-16 06:49:17 -07:00
|
|
|
fn untar_snapshot_in<P: AsRef<Path>>(
|
|
|
|
snapshot_tar: P,
|
|
|
|
unpack_dir: &Path,
|
|
|
|
account_paths: &[PathBuf],
|
|
|
|
archive_format: ArchiveFormat,
|
|
|
|
parallel_divisions: usize,
|
|
|
|
) -> Result<UnpackedAppendVecMap> {
|
2022-07-19 16:15:41 -07:00
|
|
|
let shared_buffer = untar_snapshot_create_shared_buffer(snapshot_tar.as_ref(), archive_format);
|
2022-07-19 13:30:30 -07:00
|
|
|
unpack_snapshot_local(shared_buffer, unpack_dir, account_paths, parallel_divisions)
|
2022-05-16 06:49:17 -07:00
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
fn verify_unpacked_snapshots_dir_and_version(
|
|
|
|
unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion,
|
|
|
|
) -> Result<(SnapshotVersion, BankSnapshotInfo)> {
|
|
|
|
info!(
|
|
|
|
"snapshot version: {}",
|
|
|
|
&unpacked_snapshots_dir_and_version.snapshot_version
|
|
|
|
);
|
2020-01-21 21:06:21 -08:00
|
|
|
|
2022-09-22 09:44:25 -07:00
|
|
|
let snapshot_version = unpacked_snapshots_dir_and_version.snapshot_version;
|
2022-04-06 19:39:26 -07:00
|
|
|
let mut bank_snapshots =
|
|
|
|
get_bank_snapshots_post(&unpacked_snapshots_dir_and_version.unpacked_snapshots_dir);
|
|
|
|
if bank_snapshots.len() > 1 {
|
2019-09-25 13:42:19 -07:00
|
|
|
return Err(get_io_error("invalid snapshot format"));
|
|
|
|
}
|
2022-04-06 19:39:26 -07:00
|
|
|
let root_paths = bank_snapshots
|
2019-09-25 13:42:19 -07:00
|
|
|
.pop()
|
2019-08-05 22:53:19 -07:00
|
|
|
.ok_or_else(|| get_io_error("No snapshots found in snapshots directory"))?;
|
2021-07-22 12:40:37 -07:00
|
|
|
Ok((snapshot_version, root_paths))
|
2021-06-16 09:01:52 -07:00
|
|
|
}
|
2019-09-25 13:42:19 -07:00
|
|
|
|
2022-07-06 15:30:30 -07:00
|
|
|
fn bank_fields_from_snapshots(
|
|
|
|
full_snapshot_unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion,
|
|
|
|
incremental_snapshot_unpacked_snapshots_dir_and_version: Option<
|
|
|
|
&UnpackedSnapshotsDirAndVersion,
|
|
|
|
>,
|
|
|
|
) -> Result<BankFieldsToDeserialize> {
|
|
|
|
let (full_snapshot_version, full_snapshot_root_paths) =
|
|
|
|
verify_unpacked_snapshots_dir_and_version(
|
|
|
|
full_snapshot_unpacked_snapshots_dir_and_version,
|
|
|
|
)?;
|
|
|
|
let (incremental_snapshot_version, incremental_snapshot_root_paths) =
|
|
|
|
if let Some(snapshot_unpacked_snapshots_dir_and_version) =
|
|
|
|
incremental_snapshot_unpacked_snapshots_dir_and_version
|
|
|
|
{
|
|
|
|
let (snapshot_version, bank_snapshot_info) = verify_unpacked_snapshots_dir_and_version(
|
|
|
|
snapshot_unpacked_snapshots_dir_and_version,
|
|
|
|
)?;
|
|
|
|
(Some(snapshot_version), Some(bank_snapshot_info))
|
|
|
|
} else {
|
|
|
|
(None, None)
|
|
|
|
};
|
|
|
|
info!(
|
|
|
|
"Loading bank from full snapshot {} and incremental snapshot {:?}",
|
|
|
|
full_snapshot_root_paths.snapshot_path.display(),
|
|
|
|
incremental_snapshot_root_paths
|
|
|
|
.as_ref()
|
|
|
|
.map(|paths| paths.snapshot_path.display()),
|
|
|
|
);
|
|
|
|
|
|
|
|
let snapshot_root_paths = SnapshotRootPaths {
|
|
|
|
full_snapshot_root_file_path: full_snapshot_root_paths.snapshot_path,
|
|
|
|
incremental_snapshot_root_file_path: incremental_snapshot_root_paths
|
|
|
|
.map(|root_paths| root_paths.snapshot_path),
|
|
|
|
};
|
|
|
|
|
|
|
|
deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| {
|
|
|
|
Ok(
|
|
|
|
match incremental_snapshot_version.unwrap_or(full_snapshot_version) {
|
|
|
|
SnapshotVersion::V1_2_0 => fields_from_streams(SerdeStyle::Newer, snapshot_streams)
|
|
|
|
.map(|(bank_fields, _accountsdb_fields)| bank_fields),
|
|
|
|
}?,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-06-16 09:01:52 -07:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
fn rebuild_bank_from_snapshots(
|
2021-07-22 12:40:37 -07:00
|
|
|
full_snapshot_unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion,
|
|
|
|
incremental_snapshot_unpacked_snapshots_dir_and_version: Option<
|
|
|
|
&UnpackedSnapshotsDirAndVersion,
|
|
|
|
>,
|
2021-06-16 09:01:52 -07:00
|
|
|
account_paths: &[PathBuf],
|
2022-08-29 11:17:27 -07:00
|
|
|
storage_and_next_append_vec_id: StorageAndNextAppendVecId,
|
2021-06-16 09:01:52 -07:00
|
|
|
genesis_config: &GenesisConfig,
|
2022-08-05 12:49:00 -07:00
|
|
|
runtime_config: &RuntimeConfig,
|
2021-06-16 09:01:52 -07:00
|
|
|
debug_keys: Option<Arc<HashSet<Pubkey>>>,
|
|
|
|
additional_builtins: Option<&Builtins>,
|
2021-07-22 12:40:37 -07:00
|
|
|
account_secondary_indexes: AccountSecondaryIndexes,
|
2021-06-16 09:01:52 -07:00
|
|
|
accounts_db_caching_enabled: bool,
|
|
|
|
limit_load_slot_count_from_snapshot: Option<usize>,
|
|
|
|
shrink_ratio: AccountShrinkThreshold,
|
2021-07-13 09:06:18 -07:00
|
|
|
verify_index: bool,
|
2021-09-07 21:30:38 -07:00
|
|
|
accounts_db_config: Option<AccountsDbConfig>,
|
2021-09-30 14:26:17 -07:00
|
|
|
accounts_update_notifier: Option<AccountsUpdateNotifier>,
|
2022-09-12 11:51:12 -07:00
|
|
|
exit: &Arc<AtomicBool>,
|
2021-06-16 09:01:52 -07:00
|
|
|
) -> Result<Bank> {
|
2021-07-22 12:40:37 -07:00
|
|
|
let (full_snapshot_version, full_snapshot_root_paths) =
|
|
|
|
verify_unpacked_snapshots_dir_and_version(
|
|
|
|
full_snapshot_unpacked_snapshots_dir_and_version,
|
|
|
|
)?;
|
|
|
|
let (incremental_snapshot_version, incremental_snapshot_root_paths) =
|
|
|
|
if let Some(snapshot_unpacked_snapshots_dir_and_version) =
|
|
|
|
incremental_snapshot_unpacked_snapshots_dir_and_version
|
|
|
|
{
|
|
|
|
let (snapshot_version, bank_snapshot_info) = verify_unpacked_snapshots_dir_and_version(
|
|
|
|
snapshot_unpacked_snapshots_dir_and_version,
|
|
|
|
)?;
|
|
|
|
(Some(snapshot_version), Some(bank_snapshot_info))
|
|
|
|
} else {
|
|
|
|
(None, None)
|
|
|
|
};
|
2021-01-02 09:09:50 -08:00
|
|
|
info!(
|
2021-07-22 12:40:37 -07:00
|
|
|
"Loading bank from full snapshot {} and incremental snapshot {:?}",
|
|
|
|
full_snapshot_root_paths.snapshot_path.display(),
|
|
|
|
incremental_snapshot_root_paths
|
|
|
|
.as_ref()
|
|
|
|
.map(|paths| paths.snapshot_path.display()),
|
2021-01-02 09:09:50 -08:00
|
|
|
);
|
2021-07-22 12:40:37 -07:00
|
|
|
|
|
|
|
let snapshot_root_paths = SnapshotRootPaths {
|
|
|
|
full_snapshot_root_file_path: full_snapshot_root_paths.snapshot_path,
|
|
|
|
incremental_snapshot_root_file_path: incremental_snapshot_root_paths
|
|
|
|
.map(|root_paths| root_paths.snapshot_path),
|
|
|
|
};
|
|
|
|
|
2021-10-05 22:24:48 -07:00
|
|
|
let bank = deserialize_snapshot_data_files(&snapshot_root_paths, |snapshot_streams| {
|
2021-07-22 12:40:37 -07:00
|
|
|
Ok(
|
|
|
|
match incremental_snapshot_version.unwrap_or(full_snapshot_version) {
|
|
|
|
SnapshotVersion::V1_2_0 => bank_from_streams(
|
|
|
|
SerdeStyle::Newer,
|
2021-10-05 22:24:48 -07:00
|
|
|
snapshot_streams,
|
2021-07-22 12:40:37 -07:00
|
|
|
account_paths,
|
2022-08-29 11:17:27 -07:00
|
|
|
storage_and_next_append_vec_id,
|
2021-07-22 12:40:37 -07:00
|
|
|
genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
runtime_config,
|
2021-07-22 12:40:37 -07:00
|
|
|
debug_keys,
|
|
|
|
additional_builtins,
|
|
|
|
account_secondary_indexes,
|
|
|
|
accounts_db_caching_enabled,
|
|
|
|
limit_load_slot_count_from_snapshot,
|
|
|
|
shrink_ratio,
|
|
|
|
verify_index,
|
2021-09-07 21:30:38 -07:00
|
|
|
accounts_db_config,
|
2021-09-30 14:26:17 -07:00
|
|
|
accounts_update_notifier,
|
2022-09-12 11:51:12 -07:00
|
|
|
exit,
|
2021-07-22 12:40:37 -07:00
|
|
|
),
|
|
|
|
}?,
|
|
|
|
)
|
2020-05-22 10:54:24 -07:00
|
|
|
})?;
|
2019-08-05 22:53:19 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
// The status cache is rebuilt from the latest snapshot. So, if there's an incremental
|
|
|
|
// snapshot, use that. Otherwise use the full snapshot.
|
|
|
|
let status_cache_path = incremental_snapshot_unpacked_snapshots_dir_and_version
|
|
|
|
.map_or_else(
|
|
|
|
|| {
|
|
|
|
full_snapshot_unpacked_snapshots_dir_and_version
|
|
|
|
.unpacked_snapshots_dir
|
|
|
|
.as_path()
|
|
|
|
},
|
|
|
|
|unpacked_snapshots_dir_and_version| {
|
|
|
|
unpacked_snapshots_dir_and_version
|
|
|
|
.unpacked_snapshots_dir
|
|
|
|
.as_path()
|
|
|
|
},
|
|
|
|
)
|
2022-03-07 09:34:35 -08:00
|
|
|
.join(SNAPSHOT_STATUS_CACHE_FILENAME);
|
2020-05-22 10:54:24 -07:00
|
|
|
let slot_deltas = deserialize_snapshot_data_file(&status_cache_path, |stream| {
|
2021-01-02 09:09:50 -08:00
|
|
|
info!(
|
|
|
|
"Rebuilding status cache from {}",
|
|
|
|
status_cache_path.display()
|
|
|
|
);
|
2020-07-08 17:08:05 -07:00
|
|
|
let slot_deltas: Vec<BankSlotDelta> = bincode::options()
|
|
|
|
.with_limit(MAX_SNAPSHOT_DATA_FILE_SIZE)
|
|
|
|
.with_fixint_encoding()
|
|
|
|
.allow_trailing_bytes()
|
2020-05-22 10:54:24 -07:00
|
|
|
.deserialize_from(stream)?;
|
|
|
|
Ok(slot_deltas)
|
|
|
|
})?;
|
2019-08-06 18:47:30 -07:00
|
|
|
|
2022-08-18 06:48:58 -07:00
|
|
|
verify_slot_deltas(slot_deltas.as_slice(), &bank)?;
|
|
|
|
|
2022-06-24 06:38:56 -07:00
|
|
|
bank.status_cache.write().unwrap().append(&slot_deltas);
|
2019-07-31 17:58:10 -07:00
|
|
|
|
2022-04-18 20:44:19 -07:00
|
|
|
bank.prepare_rewrites_for_hash();
|
|
|
|
|
2020-02-05 17:40:02 -08:00
|
|
|
info!("Loaded bank for slot: {}", bank.slot());
|
2019-08-05 22:53:19 -07:00
|
|
|
Ok(bank)
|
2019-07-31 17:58:10 -07:00
|
|
|
}
|
|
|
|
|
2022-08-18 06:48:58 -07:00
|
|
|
/// Verify that the snapshot's slot deltas are not corrupt/invalid
|
|
|
|
fn verify_slot_deltas(
|
|
|
|
slot_deltas: &[BankSlotDelta],
|
|
|
|
bank: &Bank,
|
|
|
|
) -> std::result::Result<(), VerifySlotDeltasError> {
|
|
|
|
let info = verify_slot_deltas_structural(slot_deltas, bank.slot())?;
|
|
|
|
verify_slot_deltas_with_history(&info.slots, &bank.get_slot_history(), bank.slot())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Verify that the snapshot's slot deltas are not corrupt/invalid
|
|
|
|
/// These checks are simple/structural
|
|
|
|
fn verify_slot_deltas_structural(
|
|
|
|
slot_deltas: &[BankSlotDelta],
|
|
|
|
bank_slot: Slot,
|
|
|
|
) -> std::result::Result<VerifySlotDeltasStructuralInfo, VerifySlotDeltasError> {
|
|
|
|
// there should not be more entries than that status cache's max
|
|
|
|
let num_entries = slot_deltas.len();
|
|
|
|
if num_entries > status_cache::MAX_CACHE_ENTRIES {
|
|
|
|
return Err(VerifySlotDeltasError::TooManyEntries(
|
|
|
|
num_entries,
|
|
|
|
status_cache::MAX_CACHE_ENTRIES,
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut slots_seen_so_far = HashSet::new();
|
|
|
|
for &(slot, is_root, ..) in slot_deltas {
|
|
|
|
// all entries should be roots
|
|
|
|
if !is_root {
|
|
|
|
return Err(VerifySlotDeltasError::SlotIsNotRoot(slot));
|
|
|
|
}
|
|
|
|
|
|
|
|
// all entries should be for slots less than or equal to the bank's slot
|
|
|
|
if slot > bank_slot {
|
|
|
|
return Err(VerifySlotDeltasError::SlotGreaterThanMaxRoot(
|
|
|
|
slot, bank_slot,
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
// there should only be one entry per slot
|
|
|
|
let is_duplicate = !slots_seen_so_far.insert(slot);
|
|
|
|
if is_duplicate {
|
|
|
|
return Err(VerifySlotDeltasError::SlotHasMultipleEntries(slot));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// detect serious logic error for future careless changes. :)
|
|
|
|
assert_eq!(slots_seen_so_far.len(), slot_deltas.len());
|
|
|
|
|
|
|
|
Ok(VerifySlotDeltasStructuralInfo {
|
|
|
|
slots: slots_seen_so_far,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Computed information from `verify_slot_deltas_structural()`, that may be reused/useful later.
|
|
|
|
#[derive(Debug, PartialEq, Eq)]
|
|
|
|
struct VerifySlotDeltasStructuralInfo {
|
|
|
|
/// All the slots in the slot deltas
|
|
|
|
slots: HashSet<Slot>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Verify that the snapshot's slot deltas are not corrupt/invalid
|
|
|
|
/// These checks use the slot history for verification
|
|
|
|
fn verify_slot_deltas_with_history(
|
|
|
|
slots_from_slot_deltas: &HashSet<Slot>,
|
|
|
|
slot_history: &SlotHistory,
|
|
|
|
bank_slot: Slot,
|
|
|
|
) -> std::result::Result<(), VerifySlotDeltasError> {
|
|
|
|
// ensure the slot history is valid (as much as possible), since we're using it to verify the
|
|
|
|
// slot deltas
|
|
|
|
if slot_history.newest() != bank_slot {
|
|
|
|
return Err(VerifySlotDeltasError::BadSlotHistory);
|
|
|
|
}
|
|
|
|
|
|
|
|
// all slots in the slot deltas should be in the bank's slot history
|
|
|
|
let slot_missing_from_history = slots_from_slot_deltas
|
|
|
|
.iter()
|
|
|
|
.find(|slot| slot_history.check(**slot) != Check::Found);
|
|
|
|
if let Some(slot) = slot_missing_from_history {
|
|
|
|
return Err(VerifySlotDeltasError::SlotNotFoundInHistory(*slot));
|
|
|
|
}
|
|
|
|
|
|
|
|
// all slots in the history should be in the slot deltas (up to MAX_CACHE_ENTRIES)
|
|
|
|
// this ensures nothing was removed from the status cache
|
|
|
|
//
|
|
|
|
// go through the slot history and make sure there's an entry for each slot
|
|
|
|
// note: it's important to go highest-to-lowest since the status cache removes
|
|
|
|
// older entries first
|
|
|
|
// note: we already checked above that `bank_slot == slot_history.newest()`
|
|
|
|
let slot_missing_from_deltas = (slot_history.oldest()..=slot_history.newest())
|
|
|
|
.rev()
|
|
|
|
.filter(|slot| slot_history.check(*slot) == Check::Found)
|
|
|
|
.take(status_cache::MAX_CACHE_ENTRIES)
|
|
|
|
.find(|slot| !slots_from_slot_deltas.contains(slot));
|
|
|
|
if let Some(slot) = slot_missing_from_deltas {
|
|
|
|
return Err(VerifySlotDeltasError::SlotNotFoundInDeltas(slot));
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-04-06 19:39:26 -07:00
|
|
|
pub(crate) fn get_snapshot_file_name(slot: Slot) -> String {
|
2019-07-31 17:58:10 -07:00
|
|
|
slot.to_string()
|
|
|
|
}
|
|
|
|
|
2022-04-06 19:39:26 -07:00
|
|
|
pub(crate) fn get_bank_snapshots_dir<P: AsRef<Path>>(path: P, slot: Slot) -> PathBuf {
|
2019-07-31 17:58:10 -07:00
|
|
|
path.as_ref().join(slot.to_string())
|
|
|
|
}
|
|
|
|
|
2019-10-18 18:16:06 -07:00
|
|
|
fn get_io_error(error: &str) -> SnapshotError {
|
2019-08-14 23:14:40 -07:00
|
|
|
warn!("Snapshot Error: {:?}", error);
|
2021-02-18 23:42:09 -08:00
|
|
|
SnapshotError::Io(IoError::new(ErrorKind::Other, error))
|
2019-07-31 17:58:10 -07:00
|
|
|
}
|
|
|
|
|
2022-04-06 19:55:44 -07:00
|
|
|
#[derive(Debug, Copy, Clone)]
|
|
|
|
/// allow tests to specify what happened to the serialized format
|
|
|
|
pub enum VerifyBank {
|
|
|
|
/// the bank's serialized format is expected to be identical to what we are comparing against
|
|
|
|
Deterministic,
|
|
|
|
/// the serialized bank was 'reserialized' into a non-deterministic format at the specified slot
|
|
|
|
/// so, deserialize both files and compare deserialized results
|
|
|
|
NonDeterministic(Slot),
|
|
|
|
}
|
|
|
|
|
2020-01-23 10:20:37 -08:00
|
|
|
pub fn verify_snapshot_archive<P, Q, R>(
|
2020-02-26 20:28:53 -08:00
|
|
|
snapshot_archive: P,
|
2020-01-23 10:20:37 -08:00
|
|
|
snapshots_to_verify: Q,
|
|
|
|
storages_to_verify: R,
|
2021-01-07 22:45:42 -08:00
|
|
|
archive_format: ArchiveFormat,
|
2022-04-06 19:55:44 -07:00
|
|
|
verify_bank: VerifyBank,
|
2020-01-23 10:20:37 -08:00
|
|
|
) where
|
2019-10-19 12:09:45 -07:00
|
|
|
P: AsRef<Path>,
|
|
|
|
Q: AsRef<Path>,
|
|
|
|
R: AsRef<Path>,
|
|
|
|
{
|
|
|
|
let temp_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let unpack_dir = temp_dir.path();
|
2021-03-10 09:49:10 -08:00
|
|
|
untar_snapshot_in(
|
|
|
|
snapshot_archive,
|
2021-06-18 06:34:46 -07:00
|
|
|
unpack_dir,
|
2021-03-10 09:49:10 -08:00
|
|
|
&[unpack_dir.to_path_buf()],
|
|
|
|
archive_format,
|
2021-06-29 16:26:15 -07:00
|
|
|
1,
|
2021-03-10 09:49:10 -08:00
|
|
|
)
|
|
|
|
.unwrap();
|
2019-10-19 12:09:45 -07:00
|
|
|
|
|
|
|
// Check snapshots are the same
|
2021-03-10 09:49:10 -08:00
|
|
|
let unpacked_snapshots = unpack_dir.join("snapshots");
|
2022-04-06 19:55:44 -07:00
|
|
|
if let VerifyBank::NonDeterministic(slot) = verify_bank {
|
|
|
|
// file contents may be different, but deserialized structs should be equal
|
|
|
|
let slot = slot.to_string();
|
|
|
|
let p1 = snapshots_to_verify.as_ref().join(&slot).join(&slot);
|
|
|
|
let p2 = unpacked_snapshots.join(&slot).join(&slot);
|
|
|
|
|
|
|
|
assert!(crate::serde_snapshot::compare_two_serialized_banks(&p1, &p2).unwrap());
|
|
|
|
std::fs::remove_file(p1).unwrap();
|
|
|
|
std::fs::remove_file(p2).unwrap();
|
|
|
|
}
|
|
|
|
|
2019-10-19 12:09:45 -07:00
|
|
|
assert!(!dir_diff::is_different(&snapshots_to_verify, unpacked_snapshots).unwrap());
|
|
|
|
|
|
|
|
// Check the account entries are the same
|
2021-03-10 09:49:10 -08:00
|
|
|
let unpacked_accounts = unpack_dir.join("accounts");
|
2019-10-19 12:09:45 -07:00
|
|
|
assert!(!dir_diff::is_different(&storages_to_verify, unpacked_accounts).unwrap());
|
2019-07-31 17:58:10 -07:00
|
|
|
}
|
2020-01-09 16:49:36 -08:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Remove outdated bank snapshots
|
2022-04-06 19:39:26 -07:00
|
|
|
pub fn purge_old_bank_snapshots(bank_snapshots_dir: impl AsRef<Path>) {
|
|
|
|
let do_purge = |mut bank_snapshots: Vec<BankSnapshotInfo>| {
|
|
|
|
bank_snapshots.sort_unstable();
|
|
|
|
bank_snapshots
|
|
|
|
.into_iter()
|
|
|
|
.rev()
|
|
|
|
.skip(MAX_BANK_SNAPSHOTS_TO_RETAIN)
|
|
|
|
.for_each(|bank_snapshot| {
|
|
|
|
let r = remove_bank_snapshot(bank_snapshot.slot, &bank_snapshots_dir);
|
|
|
|
if r.is_err() {
|
|
|
|
warn!(
|
|
|
|
"Couldn't remove bank snapshot at: {}",
|
|
|
|
bank_snapshot.snapshot_path.display()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
})
|
|
|
|
};
|
|
|
|
|
|
|
|
do_purge(get_bank_snapshots_pre(&bank_snapshots_dir));
|
|
|
|
do_purge(get_bank_snapshots_post(&bank_snapshots_dir));
|
2020-09-28 16:04:46 -07:00
|
|
|
}
|
|
|
|
|
2022-03-22 19:27:54 -07:00
|
|
|
/// Get the snapshot storages for this bank
|
2022-11-07 07:09:31 -08:00
|
|
|
pub fn get_snapshot_storages(bank: &Bank) -> SnapshotStorages {
|
2021-09-27 11:07:00 -07:00
|
|
|
let mut measure_snapshot_storages = Measure::start("snapshot-storages");
|
2022-03-22 19:27:54 -07:00
|
|
|
let snapshot_storages = bank.get_snapshot_storages(None);
|
2021-09-27 11:07:00 -07:00
|
|
|
measure_snapshot_storages.stop();
|
2022-03-22 19:27:54 -07:00
|
|
|
let snapshot_storages_count = snapshot_storages.iter().map(Vec::len).sum::<usize>();
|
|
|
|
datapoint_info!(
|
|
|
|
"get_snapshot_storages",
|
|
|
|
("snapshot-storages-count", snapshot_storages_count, i64),
|
|
|
|
(
|
|
|
|
"snapshot-storages-time-ms",
|
|
|
|
measure_snapshot_storages.as_ms(),
|
|
|
|
i64
|
|
|
|
),
|
|
|
|
);
|
2021-09-27 11:07:00 -07:00
|
|
|
|
|
|
|
snapshot_storages
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Convenience function to create a full snapshot archive out of any Bank, regardless of state.
|
|
|
|
/// The Bank will be frozen during the process.
|
2022-07-05 21:12:35 -07:00
|
|
|
/// This is only called from ledger-tool or tests. Warping is a special case as well.
|
2021-07-01 10:20:56 -07:00
|
|
|
///
|
|
|
|
/// Requires:
|
|
|
|
/// - `bank` is complete
|
2021-08-06 18:16:06 -07:00
|
|
|
pub fn bank_to_full_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
2021-01-22 08:57:04 -08:00
|
|
|
bank: &Bank,
|
|
|
|
snapshot_version: Option<SnapshotVersion>,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
2021-01-21 18:34:51 -08:00
|
|
|
archive_format: ArchiveFormat,
|
2021-09-06 16:01:56 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain: usize,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain: usize,
|
2021-08-06 18:16:06 -07:00
|
|
|
) -> Result<FullSnapshotArchiveInfo> {
|
2021-01-22 08:57:04 -08:00
|
|
|
let snapshot_version = snapshot_version.unwrap_or_default();
|
|
|
|
|
|
|
|
assert!(bank.is_complete());
|
|
|
|
bank.squash(); // Bank may not be a root
|
|
|
|
bank.force_flush_accounts_cache();
|
2022-08-19 15:15:04 -07:00
|
|
|
bank.clean_accounts(Some(bank.slot()));
|
2022-10-28 14:31:16 -07:00
|
|
|
bank.update_accounts_hash(CalcAccountsHashDataSource::Storages, false, false);
|
2021-01-22 08:57:04 -08:00
|
|
|
bank.rehash(); // Bank accounts may have been manually modified by the caller
|
|
|
|
|
2021-08-21 13:41:03 -07:00
|
|
|
let temp_dir = tempfile::tempdir_in(bank_snapshots_dir)?;
|
2021-08-31 16:33:27 -07:00
|
|
|
let snapshot_storages = bank.get_snapshot_storages(None);
|
|
|
|
let bank_snapshot_info =
|
|
|
|
add_bank_snapshot(&temp_dir, bank, &snapshot_storages, snapshot_version)?;
|
2021-07-29 14:46:54 -07:00
|
|
|
|
2021-08-31 16:33:27 -07:00
|
|
|
package_and_archive_full_snapshot(
|
2021-07-22 12:40:37 -07:00
|
|
|
bank,
|
|
|
|
&bank_snapshot_info,
|
|
|
|
&temp_dir,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir,
|
|
|
|
incremental_snapshot_archives_dir,
|
2021-08-31 16:33:27 -07:00
|
|
|
snapshot_storages,
|
2021-07-22 12:40:37 -07:00
|
|
|
archive_format,
|
|
|
|
snapshot_version,
|
2021-09-06 16:01:56 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain,
|
2021-07-29 14:46:54 -07:00
|
|
|
)
|
2021-07-22 12:40:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Convenience function to create an incremental snapshot archive out of any Bank, regardless of
|
|
|
|
/// state. The Bank will be frozen during the process.
|
2022-07-05 21:12:35 -07:00
|
|
|
/// This is only called from ledger-tool or tests. Warping is a special case as well.
|
2021-07-22 12:40:37 -07:00
|
|
|
///
|
|
|
|
/// Requires:
|
|
|
|
/// - `bank` is complete
|
|
|
|
/// - `bank`'s slot is greater than `full_snapshot_slot`
|
2021-08-06 18:16:06 -07:00
|
|
|
pub fn bank_to_incremental_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
2021-07-22 12:40:37 -07:00
|
|
|
bank: &Bank,
|
|
|
|
full_snapshot_slot: Slot,
|
|
|
|
snapshot_version: Option<SnapshotVersion>,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
2021-07-22 12:40:37 -07:00
|
|
|
archive_format: ArchiveFormat,
|
2021-09-06 16:01:56 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain: usize,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain: usize,
|
2021-08-06 18:16:06 -07:00
|
|
|
) -> Result<IncrementalSnapshotArchiveInfo> {
|
2021-07-22 12:40:37 -07:00
|
|
|
let snapshot_version = snapshot_version.unwrap_or_default();
|
|
|
|
|
|
|
|
assert!(bank.is_complete());
|
|
|
|
assert!(bank.slot() > full_snapshot_slot);
|
|
|
|
bank.squash(); // Bank may not be a root
|
|
|
|
bank.force_flush_accounts_cache();
|
2022-08-19 15:15:04 -07:00
|
|
|
bank.clean_accounts(Some(full_snapshot_slot));
|
2022-10-28 14:31:16 -07:00
|
|
|
bank.update_accounts_hash(CalcAccountsHashDataSource::Storages, false, false);
|
2021-07-22 12:40:37 -07:00
|
|
|
bank.rehash(); // Bank accounts may have been manually modified by the caller
|
|
|
|
|
2021-08-21 13:41:03 -07:00
|
|
|
let temp_dir = tempfile::tempdir_in(bank_snapshots_dir)?;
|
2021-08-31 16:33:27 -07:00
|
|
|
let snapshot_storages = bank.get_snapshot_storages(Some(full_snapshot_slot));
|
|
|
|
let bank_snapshot_info =
|
|
|
|
add_bank_snapshot(&temp_dir, bank, &snapshot_storages, snapshot_version)?;
|
2021-07-29 14:46:54 -07:00
|
|
|
|
2021-08-31 16:33:27 -07:00
|
|
|
package_and_archive_incremental_snapshot(
|
2021-06-18 06:34:46 -07:00
|
|
|
bank,
|
2021-07-22 12:40:37 -07:00
|
|
|
full_snapshot_slot,
|
|
|
|
&bank_snapshot_info,
|
2021-01-22 08:57:04 -08:00
|
|
|
&temp_dir,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir,
|
|
|
|
incremental_snapshot_archives_dir,
|
2021-08-31 16:33:27 -07:00
|
|
|
snapshot_storages,
|
2021-01-21 18:34:51 -08:00
|
|
|
archive_format,
|
2021-01-22 08:57:04 -08:00
|
|
|
snapshot_version,
|
2021-09-06 16:01:56 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain,
|
2021-07-29 14:46:54 -07:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Helper function to hold shared code to package, process, and archive full snapshots
|
2022-05-10 13:37:41 -07:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2021-08-31 16:33:27 -07:00
|
|
|
pub fn package_and_archive_full_snapshot(
|
2021-07-29 14:46:54 -07:00
|
|
|
bank: &Bank,
|
|
|
|
bank_snapshot_info: &BankSnapshotInfo,
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
2021-07-29 14:46:54 -07:00
|
|
|
snapshot_storages: SnapshotStorages,
|
|
|
|
archive_format: ArchiveFormat,
|
|
|
|
snapshot_version: SnapshotVersion,
|
2021-09-06 16:01:56 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain: usize,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain: usize,
|
2021-08-06 18:16:06 -07:00
|
|
|
) -> Result<FullSnapshotArchiveInfo> {
|
2022-06-24 06:38:56 -07:00
|
|
|
let slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas();
|
2022-11-09 09:43:33 -08:00
|
|
|
let accounts_package = AccountsPackage::new_for_snapshot(
|
2022-09-07 13:41:40 -07:00
|
|
|
AccountsPackageType::Snapshot(SnapshotType::FullSnapshot),
|
2021-07-29 14:46:54 -07:00
|
|
|
bank,
|
|
|
|
bank_snapshot_info,
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir,
|
2022-06-23 13:19:06 -07:00
|
|
|
slot_deltas,
|
2022-05-10 13:37:41 -07:00
|
|
|
&full_snapshot_archives_dir,
|
|
|
|
&incremental_snapshot_archives_dir,
|
2021-07-29 14:46:54 -07:00
|
|
|
snapshot_storages,
|
|
|
|
archive_format,
|
|
|
|
snapshot_version,
|
|
|
|
None,
|
|
|
|
)?;
|
|
|
|
|
2022-04-07 12:05:57 -07:00
|
|
|
crate::serde_snapshot::reserialize_bank_with_new_accounts_hash(
|
2022-11-09 09:43:33 -08:00
|
|
|
accounts_package.snapshot_links_dir(),
|
2022-04-06 19:39:26 -07:00
|
|
|
accounts_package.slot,
|
2022-04-07 12:05:57 -07:00
|
|
|
&bank.get_accounts_hash(),
|
2022-08-17 13:14:31 -07:00
|
|
|
None,
|
2022-04-06 19:39:26 -07:00
|
|
|
);
|
|
|
|
|
2022-04-08 08:42:03 -07:00
|
|
|
let snapshot_package = SnapshotPackage::new(accounts_package, bank.get_accounts_hash());
|
2021-09-06 16:01:56 -07:00
|
|
|
archive_snapshot_package(
|
|
|
|
&snapshot_package,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir,
|
|
|
|
incremental_snapshot_archives_dir,
|
2021-09-06 16:01:56 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain,
|
|
|
|
)?;
|
2021-08-06 18:16:06 -07:00
|
|
|
|
2021-08-13 14:08:09 -07:00
|
|
|
Ok(FullSnapshotArchiveInfo::new(
|
|
|
|
snapshot_package.snapshot_archive_info,
|
|
|
|
))
|
2021-07-29 14:46:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Helper function to hold shared code to package, process, and archive incremental snapshots
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
2021-08-31 16:33:27 -07:00
|
|
|
pub fn package_and_archive_incremental_snapshot(
|
2021-07-29 14:46:54 -07:00
|
|
|
bank: &Bank,
|
|
|
|
incremental_snapshot_base_slot: Slot,
|
|
|
|
bank_snapshot_info: &BankSnapshotInfo,
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir: impl AsRef<Path>,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir: impl AsRef<Path>,
|
|
|
|
incremental_snapshot_archives_dir: impl AsRef<Path>,
|
2021-07-29 14:46:54 -07:00
|
|
|
snapshot_storages: SnapshotStorages,
|
|
|
|
archive_format: ArchiveFormat,
|
|
|
|
snapshot_version: SnapshotVersion,
|
2021-09-06 16:01:56 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain: usize,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain: usize,
|
2021-08-06 18:16:06 -07:00
|
|
|
) -> Result<IncrementalSnapshotArchiveInfo> {
|
2022-06-24 06:38:56 -07:00
|
|
|
let slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas();
|
2022-11-09 09:43:33 -08:00
|
|
|
let accounts_package = AccountsPackage::new_for_snapshot(
|
2022-09-07 13:41:40 -07:00
|
|
|
AccountsPackageType::Snapshot(SnapshotType::IncrementalSnapshot(
|
|
|
|
incremental_snapshot_base_slot,
|
|
|
|
)),
|
2021-07-29 14:46:54 -07:00
|
|
|
bank,
|
|
|
|
bank_snapshot_info,
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir,
|
2022-06-23 13:19:06 -07:00
|
|
|
slot_deltas,
|
2022-05-10 13:37:41 -07:00
|
|
|
&full_snapshot_archives_dir,
|
|
|
|
&incremental_snapshot_archives_dir,
|
2021-07-29 14:46:54 -07:00
|
|
|
snapshot_storages,
|
|
|
|
archive_format,
|
|
|
|
snapshot_version,
|
2021-02-04 07:00:33 -08:00
|
|
|
None,
|
2021-01-22 08:57:04 -08:00
|
|
|
)?;
|
|
|
|
|
2022-04-07 12:05:57 -07:00
|
|
|
crate::serde_snapshot::reserialize_bank_with_new_accounts_hash(
|
2022-11-09 09:43:33 -08:00
|
|
|
accounts_package.snapshot_links_dir(),
|
2022-04-06 19:39:26 -07:00
|
|
|
accounts_package.slot,
|
2022-04-07 12:05:57 -07:00
|
|
|
&bank.get_accounts_hash(),
|
2022-08-17 13:14:31 -07:00
|
|
|
None,
|
2022-04-06 19:39:26 -07:00
|
|
|
);
|
|
|
|
|
2022-04-08 08:42:03 -07:00
|
|
|
let snapshot_package = SnapshotPackage::new(accounts_package, bank.get_accounts_hash());
|
2021-09-06 16:01:56 -07:00
|
|
|
archive_snapshot_package(
|
|
|
|
&snapshot_package,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir,
|
|
|
|
incremental_snapshot_archives_dir,
|
2021-09-06 16:01:56 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain,
|
|
|
|
)?;
|
2021-08-06 18:16:06 -07:00
|
|
|
|
|
|
|
Ok(IncrementalSnapshotArchiveInfo::new(
|
|
|
|
incremental_snapshot_base_slot,
|
2021-08-13 14:08:09 -07:00
|
|
|
snapshot_package.snapshot_archive_info,
|
2021-08-06 18:16:06 -07:00
|
|
|
))
|
2021-07-29 14:46:54 -07:00
|
|
|
}
|
|
|
|
|
2021-09-07 16:26:35 -07:00
|
|
|
pub fn should_take_full_snapshot(
|
|
|
|
block_height: Slot,
|
|
|
|
full_snapshot_archive_interval_slots: Slot,
|
|
|
|
) -> bool {
|
|
|
|
block_height % full_snapshot_archive_interval_slots == 0
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn should_take_incremental_snapshot(
|
|
|
|
block_height: Slot,
|
|
|
|
incremental_snapshot_archive_interval_slots: Slot,
|
|
|
|
last_full_snapshot_slot: Option<Slot>,
|
|
|
|
) -> bool {
|
|
|
|
block_height % incremental_snapshot_archive_interval_slots == 0
|
|
|
|
&& last_full_snapshot_slot.is_some()
|
|
|
|
}
|
|
|
|
|
2020-01-09 16:49:36 -08:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2021-12-03 09:00:31 -08:00
|
|
|
use {
|
|
|
|
super::*,
|
2022-10-13 09:47:36 -07:00
|
|
|
crate::{accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING, status_cache::Status},
|
2021-12-03 09:00:31 -08:00
|
|
|
assert_matches::assert_matches,
|
|
|
|
bincode::{deserialize_from, serialize_into},
|
|
|
|
solana_sdk::{
|
|
|
|
genesis_config::create_genesis_config,
|
2022-06-16 14:35:25 -07:00
|
|
|
native_token::sol_to_lamports,
|
2021-12-03 09:00:31 -08:00
|
|
|
signature::{Keypair, Signer},
|
2022-08-18 06:48:58 -07:00
|
|
|
slot_history::SlotHistory,
|
2021-12-03 09:00:31 -08:00
|
|
|
system_transaction,
|
|
|
|
transaction::SanitizedTransaction,
|
|
|
|
},
|
2021-12-17 10:27:54 -08:00
|
|
|
std::{convert::TryFrom, mem::size_of},
|
|
|
|
tempfile::NamedTempFile,
|
2021-07-01 10:20:56 -07:00
|
|
|
};
|
2020-01-09 16:49:36 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_serialize_snapshot_data_file_under_limit() {
|
|
|
|
let temp_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let expected_consumed_size = size_of::<u32>() as u64;
|
2020-05-22 10:54:24 -07:00
|
|
|
let consumed_size = serialize_snapshot_data_file_capped(
|
2020-01-09 16:49:36 -08:00
|
|
|
&temp_dir.path().join("data-file"),
|
|
|
|
expected_consumed_size,
|
|
|
|
|stream| {
|
|
|
|
serialize_into(stream, &2323_u32)?;
|
|
|
|
Ok(())
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(consumed_size, expected_consumed_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_serialize_snapshot_data_file_over_limit() {
|
|
|
|
let temp_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let expected_consumed_size = size_of::<u32>() as u64;
|
2020-05-22 10:54:24 -07:00
|
|
|
let result = serialize_snapshot_data_file_capped(
|
2020-01-09 16:49:36 -08:00
|
|
|
&temp_dir.path().join("data-file"),
|
|
|
|
expected_consumed_size - 1,
|
|
|
|
|stream| {
|
|
|
|
serialize_into(stream, &2323_u32)?;
|
|
|
|
Ok(())
|
|
|
|
},
|
|
|
|
);
|
2021-02-18 23:42:09 -08:00
|
|
|
assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("too large snapshot data file to serialize"));
|
2020-01-09 16:49:36 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_deserialize_snapshot_data_file_under_limit() {
|
|
|
|
let expected_data = 2323_u32;
|
|
|
|
let expected_consumed_size = size_of::<u32>() as u64;
|
|
|
|
|
|
|
|
let temp_dir = tempfile::TempDir::new().unwrap();
|
2020-05-22 10:54:24 -07:00
|
|
|
serialize_snapshot_data_file_capped(
|
2020-01-09 16:49:36 -08:00
|
|
|
&temp_dir.path().join("data-file"),
|
|
|
|
expected_consumed_size,
|
|
|
|
|stream| {
|
|
|
|
serialize_into(stream, &expected_data)?;
|
|
|
|
Ok(())
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let snapshot_root_paths = SnapshotRootPaths {
|
|
|
|
full_snapshot_root_file_path: temp_dir.path().join("data-file"),
|
|
|
|
incremental_snapshot_root_file_path: None,
|
|
|
|
};
|
|
|
|
|
|
|
|
let actual_data = deserialize_snapshot_data_files_capped(
|
|
|
|
&snapshot_root_paths,
|
2020-01-09 16:49:36 -08:00
|
|
|
expected_consumed_size,
|
2021-07-22 12:40:37 -07:00
|
|
|
|stream| {
|
|
|
|
Ok(deserialize_from::<_, u32>(
|
|
|
|
&mut stream.full_snapshot_stream,
|
|
|
|
)?)
|
|
|
|
},
|
2020-01-09 16:49:36 -08:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(actual_data, expected_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_deserialize_snapshot_data_file_over_limit() {
|
|
|
|
let expected_data = 2323_u32;
|
|
|
|
let expected_consumed_size = size_of::<u32>() as u64;
|
|
|
|
|
|
|
|
let temp_dir = tempfile::TempDir::new().unwrap();
|
2020-05-22 10:54:24 -07:00
|
|
|
serialize_snapshot_data_file_capped(
|
2020-01-09 16:49:36 -08:00
|
|
|
&temp_dir.path().join("data-file"),
|
|
|
|
expected_consumed_size,
|
|
|
|
|stream| {
|
|
|
|
serialize_into(stream, &expected_data)?;
|
|
|
|
Ok(())
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let snapshot_root_paths = SnapshotRootPaths {
|
|
|
|
full_snapshot_root_file_path: temp_dir.path().join("data-file"),
|
|
|
|
incremental_snapshot_root_file_path: None,
|
|
|
|
};
|
|
|
|
|
|
|
|
let result = deserialize_snapshot_data_files_capped(
|
|
|
|
&snapshot_root_paths,
|
2020-01-09 16:49:36 -08:00
|
|
|
expected_consumed_size - 1,
|
2021-07-22 12:40:37 -07:00
|
|
|
|stream| {
|
|
|
|
Ok(deserialize_from::<_, u32>(
|
|
|
|
&mut stream.full_snapshot_stream,
|
|
|
|
)?)
|
|
|
|
},
|
2020-01-09 16:49:36 -08:00
|
|
|
);
|
2021-02-18 23:42:09 -08:00
|
|
|
assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("too large snapshot data file to deserialize"));
|
2020-01-09 16:49:36 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_deserialize_snapshot_data_file_extra_data() {
|
|
|
|
let expected_data = 2323_u32;
|
|
|
|
let expected_consumed_size = size_of::<u32>() as u64;
|
|
|
|
|
|
|
|
let temp_dir = tempfile::TempDir::new().unwrap();
|
2020-05-22 10:54:24 -07:00
|
|
|
serialize_snapshot_data_file_capped(
|
2020-01-09 16:49:36 -08:00
|
|
|
&temp_dir.path().join("data-file"),
|
|
|
|
expected_consumed_size * 2,
|
|
|
|
|stream| {
|
|
|
|
serialize_into(stream.by_ref(), &expected_data)?;
|
|
|
|
serialize_into(stream.by_ref(), &expected_data)?;
|
|
|
|
Ok(())
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let snapshot_root_paths = SnapshotRootPaths {
|
|
|
|
full_snapshot_root_file_path: temp_dir.path().join("data-file"),
|
|
|
|
incremental_snapshot_root_file_path: None,
|
|
|
|
};
|
|
|
|
|
|
|
|
let result = deserialize_snapshot_data_files_capped(
|
|
|
|
&snapshot_root_paths,
|
2020-01-09 16:49:36 -08:00
|
|
|
expected_consumed_size * 2,
|
2021-07-22 12:40:37 -07:00
|
|
|
|stream| {
|
|
|
|
Ok(deserialize_from::<_, u32>(
|
|
|
|
&mut stream.full_snapshot_stream,
|
|
|
|
)?)
|
|
|
|
},
|
2020-01-09 16:49:36 -08:00
|
|
|
);
|
2021-02-18 23:42:09 -08:00
|
|
|
assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("invalid snapshot data file"));
|
2020-01-09 16:49:36 -08:00
|
|
|
}
|
2020-02-24 12:37:14 -08:00
|
|
|
|
2021-12-17 10:27:54 -08:00
|
|
|
#[test]
|
|
|
|
fn test_snapshot_version_from_file_under_limit() {
|
2022-01-13 10:19:15 -08:00
|
|
|
let file_content = SnapshotVersion::default().as_str();
|
2021-12-17 10:27:54 -08:00
|
|
|
let mut file = NamedTempFile::new().unwrap();
|
|
|
|
file.write_all(file_content.as_bytes()).unwrap();
|
|
|
|
let version_from_file = snapshot_version_from_file(file.path()).unwrap();
|
|
|
|
assert_eq!(version_from_file, file_content);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_snapshot_version_from_file_over_limit() {
|
|
|
|
let over_limit_size = usize::try_from(MAX_SNAPSHOT_VERSION_FILE_SIZE + 1).unwrap();
|
|
|
|
let file_content = vec![7u8; over_limit_size];
|
|
|
|
let mut file = NamedTempFile::new().unwrap();
|
|
|
|
file.write_all(&file_content).unwrap();
|
|
|
|
assert_matches!(
|
|
|
|
snapshot_version_from_file(file.path()),
|
|
|
|
Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("snapshot version file too large")
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-02-24 12:37:14 -08:00
|
|
|
#[test]
|
2021-07-22 12:40:37 -07:00
|
|
|
fn test_parse_full_snapshot_archive_filename() {
|
2020-02-24 12:37:14 -08:00
|
|
|
assert_eq!(
|
2021-07-22 12:40:37 -07:00
|
|
|
parse_full_snapshot_archive_filename(&format!(
|
|
|
|
"snapshot-42-{}.tar.bz2",
|
|
|
|
Hash::default()
|
|
|
|
))
|
|
|
|
.unwrap(),
|
2022-10-31 11:28:35 -07:00
|
|
|
(42, SnapshotHash(Hash::default()), ArchiveFormat::TarBzip2)
|
2020-02-24 12:37:14 -08:00
|
|
|
);
|
2020-04-03 13:13:49 -07:00
|
|
|
assert_eq!(
|
2021-07-22 12:40:37 -07:00
|
|
|
parse_full_snapshot_archive_filename(&format!(
|
|
|
|
"snapshot-43-{}.tar.zst",
|
|
|
|
Hash::default()
|
|
|
|
))
|
|
|
|
.unwrap(),
|
2022-10-31 11:28:35 -07:00
|
|
|
(43, SnapshotHash(Hash::default()), ArchiveFormat::TarZstd)
|
2020-04-03 13:13:49 -07:00
|
|
|
);
|
2020-11-19 13:43:33 -08:00
|
|
|
assert_eq!(
|
2021-07-22 12:40:37 -07:00
|
|
|
parse_full_snapshot_archive_filename(&format!("snapshot-44-{}.tar", Hash::default()))
|
|
|
|
.unwrap(),
|
2022-10-31 11:28:35 -07:00
|
|
|
(44, SnapshotHash(Hash::default()), ArchiveFormat::Tar)
|
2020-11-19 13:43:33 -08:00
|
|
|
);
|
2022-05-31 09:06:41 -07:00
|
|
|
assert_eq!(
|
|
|
|
parse_full_snapshot_archive_filename(&format!(
|
|
|
|
"snapshot-45-{}.tar.lz4",
|
|
|
|
Hash::default()
|
|
|
|
))
|
|
|
|
.unwrap(),
|
2022-10-31 11:28:35 -07:00
|
|
|
(45, SnapshotHash(Hash::default()), ArchiveFormat::TarLz4)
|
2022-05-31 09:06:41 -07:00
|
|
|
);
|
2020-04-03 13:13:49 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
assert!(parse_full_snapshot_archive_filename("invalid").is_err());
|
|
|
|
assert!(
|
|
|
|
parse_full_snapshot_archive_filename("snapshot-bad!slot-bad!hash.bad!ext").is_err()
|
|
|
|
);
|
2021-05-27 10:00:27 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
assert!(
|
|
|
|
parse_full_snapshot_archive_filename("snapshot-12345678-bad!hash.bad!ext").is_err()
|
|
|
|
);
|
|
|
|
assert!(parse_full_snapshot_archive_filename(&format!(
|
2021-07-01 10:20:56 -07:00
|
|
|
"snapshot-12345678-{}.bad!ext",
|
|
|
|
Hash::new_unique()
|
|
|
|
))
|
2021-07-22 12:40:37 -07:00
|
|
|
.is_err());
|
|
|
|
assert!(parse_full_snapshot_archive_filename("snapshot-12345678-bad!hash.tar").is_err());
|
2021-05-27 10:00:27 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
assert!(parse_full_snapshot_archive_filename(&format!(
|
2021-07-01 10:20:56 -07:00
|
|
|
"snapshot-bad!slot-{}.bad!ext",
|
|
|
|
Hash::new_unique()
|
|
|
|
))
|
2021-07-22 12:40:37 -07:00
|
|
|
.is_err());
|
|
|
|
assert!(parse_full_snapshot_archive_filename(&format!(
|
2021-07-01 10:20:56 -07:00
|
|
|
"snapshot-12345678-{}.bad!ext",
|
|
|
|
Hash::new_unique()
|
|
|
|
))
|
2021-07-22 12:40:37 -07:00
|
|
|
.is_err());
|
|
|
|
assert!(parse_full_snapshot_archive_filename(&format!(
|
2021-07-01 10:20:56 -07:00
|
|
|
"snapshot-bad!slot-{}.tar",
|
|
|
|
Hash::new_unique()
|
|
|
|
))
|
2021-07-22 12:40:37 -07:00
|
|
|
.is_err());
|
2021-05-27 10:00:27 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
assert!(parse_full_snapshot_archive_filename("snapshot-bad!slot-bad!hash.tar").is_err());
|
|
|
|
assert!(parse_full_snapshot_archive_filename("snapshot-12345678-bad!hash.tar").is_err());
|
|
|
|
assert!(parse_full_snapshot_archive_filename(&format!(
|
2021-07-01 10:20:56 -07:00
|
|
|
"snapshot-bad!slot-{}.tar",
|
|
|
|
Hash::new_unique()
|
|
|
|
))
|
2021-07-22 12:40:37 -07:00
|
|
|
.is_err());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_parse_incremental_snapshot_archive_filename() {
|
|
|
|
solana_logger::setup();
|
|
|
|
assert_eq!(
|
|
|
|
parse_incremental_snapshot_archive_filename(&format!(
|
|
|
|
"incremental-snapshot-42-123-{}.tar.bz2",
|
|
|
|
Hash::default()
|
|
|
|
))
|
|
|
|
.unwrap(),
|
2022-10-31 11:28:35 -07:00
|
|
|
(
|
|
|
|
42,
|
|
|
|
123,
|
|
|
|
SnapshotHash(Hash::default()),
|
|
|
|
ArchiveFormat::TarBzip2
|
|
|
|
)
|
2021-07-22 12:40:37 -07:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
parse_incremental_snapshot_archive_filename(&format!(
|
|
|
|
"incremental-snapshot-43-234-{}.tar.zst",
|
|
|
|
Hash::default()
|
|
|
|
))
|
|
|
|
.unwrap(),
|
2022-10-31 11:28:35 -07:00
|
|
|
(
|
|
|
|
43,
|
|
|
|
234,
|
|
|
|
SnapshotHash(Hash::default()),
|
|
|
|
ArchiveFormat::TarZstd
|
|
|
|
)
|
2021-07-22 12:40:37 -07:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
parse_incremental_snapshot_archive_filename(&format!(
|
|
|
|
"incremental-snapshot-44-345-{}.tar",
|
|
|
|
Hash::default()
|
|
|
|
))
|
|
|
|
.unwrap(),
|
2022-10-31 11:28:35 -07:00
|
|
|
(44, 345, SnapshotHash(Hash::default()), ArchiveFormat::Tar)
|
2021-07-22 12:40:37 -07:00
|
|
|
);
|
2022-05-31 09:06:41 -07:00
|
|
|
assert_eq!(
|
|
|
|
parse_incremental_snapshot_archive_filename(&format!(
|
|
|
|
"incremental-snapshot-45-456-{}.tar.lz4",
|
|
|
|
Hash::default()
|
|
|
|
))
|
|
|
|
.unwrap(),
|
2022-10-31 11:28:35 -07:00
|
|
|
(
|
|
|
|
45,
|
|
|
|
456,
|
|
|
|
SnapshotHash(Hash::default()),
|
|
|
|
ArchiveFormat::TarLz4
|
|
|
|
)
|
2022-05-31 09:06:41 -07:00
|
|
|
);
|
2021-07-22 12:40:37 -07:00
|
|
|
|
|
|
|
assert!(parse_incremental_snapshot_archive_filename("invalid").is_err());
|
|
|
|
assert!(parse_incremental_snapshot_archive_filename(&format!(
|
|
|
|
"snapshot-42-{}.tar",
|
|
|
|
Hash::new_unique()
|
|
|
|
))
|
|
|
|
.is_err());
|
|
|
|
assert!(parse_incremental_snapshot_archive_filename(
|
|
|
|
"incremental-snapshot-bad!slot-bad!slot-bad!hash.bad!ext"
|
|
|
|
)
|
|
|
|
.is_err());
|
|
|
|
|
|
|
|
assert!(parse_incremental_snapshot_archive_filename(&format!(
|
|
|
|
"incremental-snapshot-bad!slot-56785678-{}.tar",
|
|
|
|
Hash::new_unique()
|
|
|
|
))
|
|
|
|
.is_err());
|
|
|
|
|
|
|
|
assert!(parse_incremental_snapshot_archive_filename(&format!(
|
|
|
|
"incremental-snapshot-12345678-bad!slot-{}.tar",
|
|
|
|
Hash::new_unique()
|
|
|
|
))
|
|
|
|
.is_err());
|
|
|
|
|
|
|
|
assert!(parse_incremental_snapshot_archive_filename(
|
|
|
|
"incremental-snapshot-12341234-56785678-bad!HASH.tar"
|
|
|
|
)
|
|
|
|
.is_err());
|
|
|
|
|
|
|
|
assert!(parse_incremental_snapshot_archive_filename(&format!(
|
|
|
|
"incremental-snapshot-12341234-56785678-{}.bad!ext",
|
|
|
|
Hash::new_unique()
|
|
|
|
))
|
|
|
|
.is_err());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_check_are_snapshots_compatible() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let slot1: Slot = 1234;
|
|
|
|
let slot2: Slot = 5678;
|
|
|
|
let slot3: Slot = 999_999;
|
|
|
|
|
2021-08-06 18:16:06 -07:00
|
|
|
let full_snapshot_archive_info = FullSnapshotArchiveInfo::new_from_path(PathBuf::from(
|
|
|
|
format!("/dir/snapshot-{}-{}.tar", slot1, Hash::new_unique()),
|
|
|
|
))
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
assert!(check_are_snapshots_compatible(&full_snapshot_archive_info, None,).is_ok());
|
|
|
|
|
|
|
|
let incremental_snapshot_archive_info =
|
|
|
|
IncrementalSnapshotArchiveInfo::new_from_path(PathBuf::from(format!(
|
2021-07-22 12:40:37 -07:00
|
|
|
"/dir/incremental-snapshot-{}-{}-{}.tar",
|
|
|
|
slot1,
|
|
|
|
slot2,
|
|
|
|
Hash::new_unique()
|
2021-08-06 18:16:06 -07:00
|
|
|
)))
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
assert!(check_are_snapshots_compatible(
|
|
|
|
&full_snapshot_archive_info,
|
|
|
|
Some(&incremental_snapshot_archive_info)
|
2021-07-22 12:40:37 -07:00
|
|
|
)
|
|
|
|
.is_ok());
|
|
|
|
|
2021-08-06 18:16:06 -07:00
|
|
|
let incremental_snapshot_archive_info =
|
|
|
|
IncrementalSnapshotArchiveInfo::new_from_path(PathBuf::from(format!(
|
2021-07-22 12:40:37 -07:00
|
|
|
"/dir/incremental-snapshot-{}-{}-{}.tar",
|
|
|
|
slot2,
|
|
|
|
slot3,
|
|
|
|
Hash::new_unique()
|
2021-08-06 18:16:06 -07:00
|
|
|
)))
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
assert!(check_are_snapshots_compatible(
|
|
|
|
&full_snapshot_archive_info,
|
|
|
|
Some(&incremental_snapshot_archive_info)
|
2021-07-22 12:40:37 -07:00
|
|
|
)
|
|
|
|
.is_err());
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A test heler function that creates bank snapshot files
|
2021-08-21 13:41:03 -07:00
|
|
|
fn common_create_bank_snapshot_files(
|
|
|
|
bank_snapshots_dir: &Path,
|
|
|
|
min_slot: Slot,
|
|
|
|
max_slot: Slot,
|
|
|
|
) {
|
2021-07-22 12:40:37 -07:00
|
|
|
for slot in min_slot..max_slot {
|
2021-08-21 13:41:03 -07:00
|
|
|
let snapshot_dir = get_bank_snapshots_dir(bank_snapshots_dir, slot);
|
2021-07-22 12:40:37 -07:00
|
|
|
fs::create_dir_all(&snapshot_dir).unwrap();
|
|
|
|
|
|
|
|
let snapshot_filename = get_snapshot_file_name(slot);
|
|
|
|
let snapshot_path = snapshot_dir.join(snapshot_filename);
|
|
|
|
File::create(snapshot_path).unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-04-06 19:39:26 -07:00
|
|
|
fn test_get_bank_snapshots() {
|
2021-07-22 12:40:37 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
let temp_snapshots_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let min_slot = 10;
|
|
|
|
let max_slot = 20;
|
|
|
|
common_create_bank_snapshot_files(temp_snapshots_dir.path(), min_slot, max_slot);
|
|
|
|
|
2022-04-06 19:39:26 -07:00
|
|
|
let bank_snapshots = get_bank_snapshots(temp_snapshots_dir.path());
|
|
|
|
assert_eq!(bank_snapshots.len() as Slot, max_slot - min_slot);
|
2021-07-22 12:40:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2022-04-06 19:39:26 -07:00
|
|
|
fn test_get_highest_bank_snapshot_post() {
|
2021-07-22 12:40:37 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
let temp_snapshots_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let min_slot = 99;
|
|
|
|
let max_slot = 123;
|
|
|
|
common_create_bank_snapshot_files(temp_snapshots_dir.path(), min_slot, max_slot);
|
|
|
|
|
2022-04-06 19:39:26 -07:00
|
|
|
let highest_bank_snapshot = get_highest_bank_snapshot_post(temp_snapshots_dir.path());
|
|
|
|
assert!(highest_bank_snapshot.is_some());
|
|
|
|
assert_eq!(highest_bank_snapshot.unwrap().slot, max_slot - 1);
|
2021-05-27 10:00:27 -07:00
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// A test helper function that creates full and incremental snapshot archive files. Creates
|
|
|
|
/// full snapshot files in the range (`min_full_snapshot_slot`, `max_full_snapshot_slot`], and
|
|
|
|
/// incremental snapshot files in the range (`min_incremental_snapshot_slot`,
|
|
|
|
/// `max_incremental_snapshot_slot`]. Additionally, "bad" files are created for both full and
|
|
|
|
/// incremental snapshots to ensure the tests properly filter them out.
|
2021-07-01 10:20:56 -07:00
|
|
|
fn common_create_snapshot_archive_files(
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir: &Path,
|
|
|
|
incremental_snapshot_archives_dir: &Path,
|
2021-07-22 12:40:37 -07:00
|
|
|
min_full_snapshot_slot: Slot,
|
|
|
|
max_full_snapshot_slot: Slot,
|
|
|
|
min_incremental_snapshot_slot: Slot,
|
|
|
|
max_incremental_snapshot_slot: Slot,
|
2021-07-01 10:20:56 -07:00
|
|
|
) {
|
2022-05-10 13:37:41 -07:00
|
|
|
fs::create_dir_all(full_snapshot_archives_dir).unwrap();
|
|
|
|
fs::create_dir_all(incremental_snapshot_archives_dir).unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
for full_snapshot_slot in min_full_snapshot_slot..max_full_snapshot_slot {
|
|
|
|
for incremental_snapshot_slot in
|
|
|
|
min_incremental_snapshot_slot..max_incremental_snapshot_slot
|
|
|
|
{
|
|
|
|
let snapshot_filename = format!(
|
|
|
|
"incremental-snapshot-{}-{}-{}.tar",
|
|
|
|
full_snapshot_slot,
|
|
|
|
incremental_snapshot_slot,
|
|
|
|
Hash::default()
|
|
|
|
);
|
2022-05-10 13:37:41 -07:00
|
|
|
let snapshot_filepath = incremental_snapshot_archives_dir.join(snapshot_filename);
|
2021-07-22 12:40:37 -07:00
|
|
|
File::create(snapshot_filepath).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
let snapshot_filename =
|
|
|
|
format!("snapshot-{}-{}.tar", full_snapshot_slot, Hash::default());
|
2022-05-10 13:37:41 -07:00
|
|
|
let snapshot_filepath = full_snapshot_archives_dir.join(snapshot_filename);
|
2021-05-27 10:00:27 -07:00
|
|
|
File::create(snapshot_filepath).unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
|
|
|
|
// Add in an incremental snapshot with a bad filename and high slot to ensure filename are filtered and sorted correctly
|
|
|
|
let bad_filename = format!(
|
|
|
|
"incremental-snapshot-{}-{}-bad!hash.tar",
|
|
|
|
full_snapshot_slot,
|
|
|
|
max_incremental_snapshot_slot + 1,
|
|
|
|
);
|
2022-05-10 13:37:41 -07:00
|
|
|
let bad_filepath = incremental_snapshot_archives_dir.join(bad_filename);
|
2021-07-22 12:40:37 -07:00
|
|
|
File::create(bad_filepath).unwrap();
|
2021-05-27 10:00:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add in a snapshot with a bad filename and high slot to ensure filename are filtered and
|
|
|
|
// sorted correctly
|
2021-07-22 12:40:37 -07:00
|
|
|
let bad_filename = format!("snapshot-{}-bad!hash.tar", max_full_snapshot_slot + 1);
|
2022-05-10 13:37:41 -07:00
|
|
|
let bad_filepath = full_snapshot_archives_dir.join(bad_filename);
|
2021-05-27 10:00:27 -07:00
|
|
|
File::create(bad_filepath).unwrap();
|
2021-07-01 10:20:56 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2021-07-22 12:40:37 -07:00
|
|
|
fn test_get_full_snapshot_archives() {
|
2021-07-01 10:20:56 -07:00
|
|
|
solana_logger::setup();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-07-01 10:20:56 -07:00
|
|
|
let min_slot = 123;
|
|
|
|
let max_slot = 456;
|
2021-07-22 12:40:37 -07:00
|
|
|
common_create_snapshot_archive_files(
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
min_slot,
|
|
|
|
max_slot,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
);
|
2021-05-27 10:00:27 -07:00
|
|
|
|
2022-05-10 13:37:41 -07:00
|
|
|
let snapshot_archives = get_full_snapshot_archives(full_snapshot_archives_dir);
|
2021-07-01 10:20:56 -07:00
|
|
|
assert_eq!(snapshot_archives.len() as Slot, max_slot - min_slot);
|
|
|
|
}
|
|
|
|
|
2022-03-14 12:03:59 -07:00
|
|
|
#[test]
|
|
|
|
fn test_get_full_snapshot_archives_remote() {
|
|
|
|
solana_logger::setup();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2022-03-14 12:03:59 -07:00
|
|
|
let min_slot = 123;
|
|
|
|
let max_slot = 456;
|
|
|
|
common_create_snapshot_archive_files(
|
2022-05-10 13:37:41 -07:00
|
|
|
&full_snapshot_archives_dir.path().join("remote"),
|
|
|
|
&incremental_snapshot_archives_dir.path().join("remote"),
|
2022-03-14 12:03:59 -07:00
|
|
|
min_slot,
|
|
|
|
max_slot,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
|
2022-05-10 13:37:41 -07:00
|
|
|
let snapshot_archives = get_full_snapshot_archives(full_snapshot_archives_dir);
|
2022-03-14 12:03:59 -07:00
|
|
|
assert_eq!(snapshot_archives.len() as Slot, max_slot - min_slot);
|
|
|
|
assert!(snapshot_archives.iter().all(|info| info.is_remote()));
|
|
|
|
}
|
|
|
|
|
2021-07-01 10:20:56 -07:00
|
|
|
#[test]
|
2021-07-22 12:40:37 -07:00
|
|
|
fn test_get_incremental_snapshot_archives() {
|
2021-07-01 10:20:56 -07:00
|
|
|
solana_logger::setup();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
let min_full_snapshot_slot = 12;
|
|
|
|
let max_full_snapshot_slot = 23;
|
|
|
|
let min_incremental_snapshot_slot = 34;
|
|
|
|
let max_incremental_snapshot_slot = 45;
|
|
|
|
common_create_snapshot_archive_files(
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
min_full_snapshot_slot,
|
|
|
|
max_full_snapshot_slot,
|
|
|
|
min_incremental_snapshot_slot,
|
|
|
|
max_incremental_snapshot_slot,
|
|
|
|
);
|
2021-07-01 10:20:56 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let incremental_snapshot_archives =
|
2022-05-10 13:37:41 -07:00
|
|
|
get_incremental_snapshot_archives(incremental_snapshot_archives_dir);
|
2021-07-22 12:40:37 -07:00
|
|
|
assert_eq!(
|
|
|
|
incremental_snapshot_archives.len() as Slot,
|
|
|
|
(max_full_snapshot_slot - min_full_snapshot_slot)
|
|
|
|
* (max_incremental_snapshot_slot - min_incremental_snapshot_slot)
|
|
|
|
);
|
2021-07-01 10:20:56 -07:00
|
|
|
}
|
|
|
|
|
2022-03-14 12:03:59 -07:00
|
|
|
#[test]
|
|
|
|
fn test_get_incremental_snapshot_archives_remote() {
|
|
|
|
solana_logger::setup();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2022-03-14 12:03:59 -07:00
|
|
|
let min_full_snapshot_slot = 12;
|
|
|
|
let max_full_snapshot_slot = 23;
|
|
|
|
let min_incremental_snapshot_slot = 34;
|
|
|
|
let max_incremental_snapshot_slot = 45;
|
|
|
|
common_create_snapshot_archive_files(
|
2022-05-10 13:37:41 -07:00
|
|
|
&full_snapshot_archives_dir.path().join("remote"),
|
|
|
|
&incremental_snapshot_archives_dir.path().join("remote"),
|
2022-03-14 12:03:59 -07:00
|
|
|
min_full_snapshot_slot,
|
|
|
|
max_full_snapshot_slot,
|
|
|
|
min_incremental_snapshot_slot,
|
|
|
|
max_incremental_snapshot_slot,
|
|
|
|
);
|
|
|
|
|
|
|
|
let incremental_snapshot_archives =
|
2022-05-10 13:37:41 -07:00
|
|
|
get_incremental_snapshot_archives(incremental_snapshot_archives_dir);
|
2022-03-14 12:03:59 -07:00
|
|
|
assert_eq!(
|
|
|
|
incremental_snapshot_archives.len() as Slot,
|
|
|
|
(max_full_snapshot_slot - min_full_snapshot_slot)
|
|
|
|
* (max_incremental_snapshot_slot - min_incremental_snapshot_slot)
|
|
|
|
);
|
|
|
|
assert!(incremental_snapshot_archives
|
|
|
|
.iter()
|
|
|
|
.all(|info| info.is_remote()));
|
|
|
|
}
|
|
|
|
|
2021-07-01 10:20:56 -07:00
|
|
|
#[test]
|
2021-07-22 12:40:37 -07:00
|
|
|
fn test_get_highest_full_snapshot_archive_slot() {
|
2021-07-01 10:20:56 -07:00
|
|
|
solana_logger::setup();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-07-01 10:20:56 -07:00
|
|
|
let min_slot = 123;
|
|
|
|
let max_slot = 456;
|
2021-07-22 12:40:37 -07:00
|
|
|
common_create_snapshot_archive_files(
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
min_slot,
|
|
|
|
max_slot,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
);
|
2021-07-01 10:20:56 -07:00
|
|
|
|
|
|
|
assert_eq!(
|
2022-05-10 13:37:41 -07:00
|
|
|
get_highest_full_snapshot_archive_slot(full_snapshot_archives_dir.path()),
|
2021-07-01 10:20:56 -07:00
|
|
|
Some(max_slot - 1)
|
|
|
|
);
|
2020-02-24 12:37:14 -08:00
|
|
|
}
|
2021-05-12 10:32:27 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
#[test]
|
|
|
|
fn test_get_highest_incremental_snapshot_slot() {
|
|
|
|
solana_logger::setup();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
let min_full_snapshot_slot = 12;
|
|
|
|
let max_full_snapshot_slot = 23;
|
|
|
|
let min_incremental_snapshot_slot = 34;
|
|
|
|
let max_incremental_snapshot_slot = 45;
|
|
|
|
common_create_snapshot_archive_files(
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
min_full_snapshot_slot,
|
|
|
|
max_full_snapshot_slot,
|
|
|
|
min_incremental_snapshot_slot,
|
|
|
|
max_incremental_snapshot_slot,
|
|
|
|
);
|
|
|
|
|
|
|
|
for full_snapshot_slot in min_full_snapshot_slot..max_full_snapshot_slot {
|
|
|
|
assert_eq!(
|
|
|
|
get_highest_incremental_snapshot_archive_slot(
|
2022-05-10 13:37:41 -07:00
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
full_snapshot_slot
|
|
|
|
),
|
|
|
|
Some(max_incremental_snapshot_slot - 1)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
get_highest_incremental_snapshot_archive_slot(
|
2022-05-10 13:37:41 -07:00
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
max_full_snapshot_slot
|
|
|
|
),
|
|
|
|
None
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2021-05-12 10:32:27 -07:00
|
|
|
fn common_test_purge_old_snapshot_archives(
|
|
|
|
snapshot_names: &[&String],
|
2021-09-04 05:37:29 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain: usize,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain: usize,
|
2021-05-12 10:32:27 -07:00
|
|
|
expected_snapshots: &[&String],
|
|
|
|
) {
|
|
|
|
let temp_snap_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
|
|
|
|
for snap_name in snapshot_names {
|
2022-09-22 15:23:03 -07:00
|
|
|
let snap_path = temp_snap_dir.path().join(snap_name);
|
2021-05-12 10:32:27 -07:00
|
|
|
let mut _snap_file = File::create(snap_path);
|
|
|
|
}
|
2021-09-04 05:37:29 -07:00
|
|
|
purge_old_snapshot_archives(
|
2022-05-10 13:37:41 -07:00
|
|
|
temp_snap_dir.path(),
|
2021-09-04 05:37:29 -07:00
|
|
|
temp_snap_dir.path(),
|
|
|
|
maximum_full_snapshot_archives_to_retain,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain,
|
|
|
|
);
|
2021-05-12 10:32:27 -07:00
|
|
|
|
|
|
|
let mut retained_snaps = HashSet::new();
|
|
|
|
for entry in fs::read_dir(temp_snap_dir.path()).unwrap() {
|
|
|
|
let entry_path_buf = entry.unwrap().path();
|
|
|
|
let entry_path = entry_path_buf.as_path();
|
|
|
|
let snapshot_name = entry_path
|
|
|
|
.file_name()
|
|
|
|
.unwrap()
|
|
|
|
.to_str()
|
|
|
|
.unwrap()
|
|
|
|
.to_string();
|
|
|
|
retained_snaps.insert(snapshot_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
for snap_name in expected_snapshots {
|
2022-02-20 07:06:23 -08:00
|
|
|
assert!(
|
|
|
|
retained_snaps.contains(snap_name.as_str()),
|
|
|
|
"{} not found",
|
|
|
|
snap_name
|
|
|
|
);
|
2021-05-12 10:32:27 -07:00
|
|
|
}
|
2022-04-27 09:40:03 -07:00
|
|
|
assert_eq!(retained_snaps.len(), expected_snapshots.len());
|
2021-05-12 10:32:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2021-07-22 12:40:37 -07:00
|
|
|
fn test_purge_old_full_snapshot_archives() {
|
2021-05-12 10:32:27 -07:00
|
|
|
let snap1_name = format!("snapshot-1-{}.tar.zst", Hash::default());
|
|
|
|
let snap2_name = format!("snapshot-3-{}.tar.zst", Hash::default());
|
|
|
|
let snap3_name = format!("snapshot-50-{}.tar.zst", Hash::default());
|
|
|
|
let snapshot_names = vec![&snap1_name, &snap2_name, &snap3_name];
|
2022-02-20 07:06:23 -08:00
|
|
|
|
|
|
|
// expecting only the newest to be retained
|
|
|
|
let expected_snapshots = vec![&snap3_name];
|
2021-09-04 05:37:29 -07:00
|
|
|
common_test_purge_old_snapshot_archives(
|
|
|
|
&snapshot_names,
|
|
|
|
1,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
&expected_snapshots,
|
|
|
|
);
|
2021-05-12 10:32:27 -07:00
|
|
|
|
2022-02-20 07:06:23 -08:00
|
|
|
// retaining 0, but minimum to retain is 1
|
2021-09-04 05:37:29 -07:00
|
|
|
common_test_purge_old_snapshot_archives(
|
|
|
|
&snapshot_names,
|
|
|
|
0,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
&expected_snapshots,
|
|
|
|
);
|
2021-05-12 10:32:27 -07:00
|
|
|
|
2022-03-05 18:46:46 -08:00
|
|
|
// retaining 2, expecting the 2 newest to be retained
|
2022-02-20 07:06:23 -08:00
|
|
|
let expected_snapshots = vec![&snap2_name, &snap3_name];
|
2021-09-04 05:37:29 -07:00
|
|
|
common_test_purge_old_snapshot_archives(
|
|
|
|
&snapshot_names,
|
|
|
|
2,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
&expected_snapshots,
|
|
|
|
);
|
2022-02-20 07:06:23 -08:00
|
|
|
|
|
|
|
// retaining 3, all three should be retained
|
|
|
|
let expected_snapshots = vec![&snap1_name, &snap2_name, &snap3_name];
|
|
|
|
common_test_purge_old_snapshot_archives(
|
|
|
|
&snapshot_names,
|
|
|
|
3,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
&expected_snapshots,
|
|
|
|
);
|
2021-05-12 10:32:27 -07:00
|
|
|
}
|
2021-07-01 10:20:56 -07:00
|
|
|
|
2021-08-04 14:42:42 -07:00
|
|
|
/// Mimic a running node's behavior w.r.t. purging old snapshot archives. Take snapshots in a
|
|
|
|
/// loop, and periodically purge old snapshot archives. After purging, check to make sure the
|
|
|
|
/// snapshot archives on disk are correct.
|
|
|
|
#[test]
|
|
|
|
fn test_purge_old_full_snapshot_archives_in_the_loop() {
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-08-04 14:42:42 -07:00
|
|
|
let maximum_snapshots_to_retain = 5;
|
|
|
|
let starting_slot: Slot = 42;
|
|
|
|
|
|
|
|
for slot in (starting_slot..).take(100) {
|
|
|
|
let full_snapshot_archive_file_name =
|
|
|
|
format!("snapshot-{}-{}.tar", slot, Hash::default());
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archive_path = full_snapshot_archives_dir
|
2021-08-04 14:42:42 -07:00
|
|
|
.as_ref()
|
|
|
|
.join(full_snapshot_archive_file_name);
|
|
|
|
File::create(full_snapshot_archive_path).unwrap();
|
|
|
|
|
|
|
|
// don't purge-and-check until enough snapshot archives have been created
|
|
|
|
if slot < starting_slot + maximum_snapshots_to_retain as Slot {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// purge infrequently, so there will always be snapshot archives to purge
|
|
|
|
if slot % (maximum_snapshots_to_retain as Slot * 2) != 0 {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-09-04 05:37:29 -07:00
|
|
|
purge_old_snapshot_archives(
|
2022-05-10 13:37:41 -07:00
|
|
|
&full_snapshot_archives_dir,
|
|
|
|
&incremental_snapshot_archives_dir,
|
2021-09-04 05:37:29 -07:00
|
|
|
maximum_snapshots_to_retain,
|
|
|
|
usize::MAX,
|
|
|
|
);
|
2022-05-10 13:37:41 -07:00
|
|
|
let mut full_snapshot_archives =
|
|
|
|
get_full_snapshot_archives(&full_snapshot_archives_dir);
|
2021-08-04 14:42:42 -07:00
|
|
|
full_snapshot_archives.sort_unstable();
|
2022-04-27 09:40:03 -07:00
|
|
|
assert_eq!(full_snapshot_archives.len(), maximum_snapshots_to_retain);
|
2021-08-06 18:16:06 -07:00
|
|
|
assert_eq!(full_snapshot_archives.last().unwrap().slot(), slot);
|
2022-02-20 07:06:23 -08:00
|
|
|
for (i, full_snapshot_archive) in full_snapshot_archives.iter().rev().enumerate() {
|
2021-08-06 18:16:06 -07:00
|
|
|
assert_eq!(full_snapshot_archive.slot(), slot - i as Slot);
|
2021-08-04 14:42:42 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-01 10:20:56 -07:00
|
|
|
#[test]
|
2021-07-22 12:40:37 -07:00
|
|
|
fn test_purge_old_incremental_snapshot_archives() {
|
2022-04-27 09:40:03 -07:00
|
|
|
solana_logger::setup();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-09-04 05:37:29 -07:00
|
|
|
let starting_slot = 100_000;
|
|
|
|
|
|
|
|
let maximum_incremental_snapshot_archives_to_retain =
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN;
|
|
|
|
let maximum_full_snapshot_archives_to_retain = DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN;
|
|
|
|
|
|
|
|
let incremental_snapshot_interval = 100;
|
|
|
|
let num_incremental_snapshots_per_full_snapshot =
|
|
|
|
maximum_incremental_snapshot_archives_to_retain * 2;
|
|
|
|
let full_snapshot_interval =
|
|
|
|
incremental_snapshot_interval * num_incremental_snapshots_per_full_snapshot;
|
|
|
|
|
|
|
|
let mut snapshot_filenames = vec![];
|
|
|
|
(starting_slot..)
|
|
|
|
.step_by(full_snapshot_interval)
|
|
|
|
.take(maximum_full_snapshot_archives_to_retain * 2)
|
|
|
|
.for_each(|full_snapshot_slot| {
|
|
|
|
let snapshot_filename =
|
|
|
|
format!("snapshot-{}-{}.tar", full_snapshot_slot, Hash::default());
|
2022-05-10 13:37:41 -07:00
|
|
|
let snapshot_path = full_snapshot_archives_dir.path().join(&snapshot_filename);
|
2021-09-04 05:37:29 -07:00
|
|
|
File::create(snapshot_path).unwrap();
|
|
|
|
snapshot_filenames.push(snapshot_filename);
|
|
|
|
|
|
|
|
(full_snapshot_slot..)
|
|
|
|
.step_by(incremental_snapshot_interval)
|
|
|
|
.take(num_incremental_snapshots_per_full_snapshot)
|
|
|
|
.skip(1)
|
|
|
|
.for_each(|incremental_snapshot_slot| {
|
|
|
|
let snapshot_filename = format!(
|
|
|
|
"incremental-snapshot-{}-{}-{}.tar",
|
|
|
|
full_snapshot_slot,
|
|
|
|
incremental_snapshot_slot,
|
|
|
|
Hash::default()
|
|
|
|
);
|
2022-05-10 13:37:41 -07:00
|
|
|
let snapshot_path = incremental_snapshot_archives_dir
|
|
|
|
.path()
|
|
|
|
.join(&snapshot_filename);
|
2021-09-04 05:37:29 -07:00
|
|
|
File::create(snapshot_path).unwrap();
|
|
|
|
snapshot_filenames.push(snapshot_filename);
|
|
|
|
});
|
|
|
|
});
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2021-09-04 05:37:29 -07:00
|
|
|
purge_old_snapshot_archives(
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-09-04 05:37:29 -07:00
|
|
|
maximum_full_snapshot_archives_to_retain,
|
|
|
|
maximum_incremental_snapshot_archives_to_retain,
|
|
|
|
);
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2021-09-04 05:37:29 -07:00
|
|
|
// Ensure correct number of full snapshot archives are purged/retained
|
|
|
|
// NOTE: One extra full snapshot is always kept (the oldest), hence the `+1`
|
|
|
|
let mut remaining_full_snapshot_archives =
|
2022-05-10 13:37:41 -07:00
|
|
|
get_full_snapshot_archives(full_snapshot_archives_dir.path());
|
2021-09-04 05:37:29 -07:00
|
|
|
assert_eq!(
|
|
|
|
remaining_full_snapshot_archives.len(),
|
2022-02-20 07:06:23 -08:00
|
|
|
maximum_full_snapshot_archives_to_retain,
|
2021-09-04 05:37:29 -07:00
|
|
|
);
|
|
|
|
remaining_full_snapshot_archives.sort_unstable();
|
2022-04-27 09:40:03 -07:00
|
|
|
let latest_full_snapshot_archive_slot =
|
|
|
|
remaining_full_snapshot_archives.last().unwrap().slot();
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2021-09-04 05:37:29 -07:00
|
|
|
// Ensure correct number of incremental snapshot archives are purged/retained
|
|
|
|
let mut remaining_incremental_snapshot_archives =
|
2022-05-10 13:37:41 -07:00
|
|
|
get_incremental_snapshot_archives(incremental_snapshot_archives_dir.path());
|
2021-09-04 05:37:29 -07:00
|
|
|
assert_eq!(
|
|
|
|
remaining_incremental_snapshot_archives.len(),
|
|
|
|
maximum_incremental_snapshot_archives_to_retain
|
2022-04-27 09:40:03 -07:00
|
|
|
+ maximum_full_snapshot_archives_to_retain.saturating_sub(1)
|
2021-09-04 05:37:29 -07:00
|
|
|
);
|
|
|
|
remaining_incremental_snapshot_archives.sort_unstable();
|
2022-04-27 09:40:03 -07:00
|
|
|
remaining_incremental_snapshot_archives.reverse();
|
|
|
|
|
|
|
|
// Ensure there exists one incremental snapshot all but the latest full snapshot
|
|
|
|
for i in (1..maximum_full_snapshot_archives_to_retain).rev() {
|
|
|
|
let incremental_snapshot_archive =
|
|
|
|
remaining_incremental_snapshot_archives.pop().unwrap();
|
|
|
|
|
|
|
|
let expected_base_slot =
|
|
|
|
latest_full_snapshot_archive_slot - (i * full_snapshot_interval) as u64;
|
|
|
|
assert_eq!(incremental_snapshot_archive.base_slot(), expected_base_slot);
|
|
|
|
let expected_slot = expected_base_slot
|
|
|
|
+ (full_snapshot_interval - incremental_snapshot_interval) as u64;
|
|
|
|
assert_eq!(incremental_snapshot_archive.slot(), expected_slot);
|
|
|
|
}
|
2021-09-04 05:37:29 -07:00
|
|
|
|
|
|
|
// Ensure all remaining incremental snapshots are only for the latest full snapshot
|
|
|
|
for incremental_snapshot_archive in &remaining_incremental_snapshot_archives {
|
|
|
|
assert_eq!(
|
|
|
|
incremental_snapshot_archive.base_slot(),
|
|
|
|
latest_full_snapshot_archive_slot
|
|
|
|
);
|
2021-07-22 12:40:37 -07:00
|
|
|
}
|
2021-09-04 05:37:29 -07:00
|
|
|
|
|
|
|
// Ensure the remaining incremental snapshots are at the right slot
|
|
|
|
let expected_remaing_incremental_snapshot_archive_slots =
|
|
|
|
(latest_full_snapshot_archive_slot..)
|
|
|
|
.step_by(incremental_snapshot_interval)
|
|
|
|
.take(num_incremental_snapshots_per_full_snapshot)
|
|
|
|
.skip(
|
|
|
|
num_incremental_snapshots_per_full_snapshot
|
|
|
|
- maximum_incremental_snapshot_archives_to_retain,
|
|
|
|
)
|
2022-04-27 09:40:03 -07:00
|
|
|
.collect::<HashSet<_>>();
|
2021-09-04 05:37:29 -07:00
|
|
|
|
|
|
|
let actual_remaining_incremental_snapshot_archive_slots =
|
|
|
|
remaining_incremental_snapshot_archives
|
|
|
|
.iter()
|
|
|
|
.map(|snapshot| snapshot.slot())
|
2022-04-27 09:40:03 -07:00
|
|
|
.collect::<HashSet<_>>();
|
2021-09-04 05:37:29 -07:00
|
|
|
assert_eq!(
|
|
|
|
actual_remaining_incremental_snapshot_archive_slots,
|
|
|
|
expected_remaing_incremental_snapshot_archive_slots
|
|
|
|
);
|
2021-07-22 12:40:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_purge_all_incremental_snapshot_archives_when_no_full_snapshot_archives() {
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2021-09-04 05:37:29 -07:00
|
|
|
for snapshot_filenames in [
|
2021-07-22 12:40:37 -07:00
|
|
|
format!("incremental-snapshot-100-120-{}.tar", Hash::default()),
|
|
|
|
format!("incremental-snapshot-100-140-{}.tar", Hash::default()),
|
|
|
|
format!("incremental-snapshot-100-160-{}.tar", Hash::default()),
|
|
|
|
format!("incremental-snapshot-100-180-{}.tar", Hash::default()),
|
|
|
|
format!("incremental-snapshot-200-220-{}.tar", Hash::default()),
|
|
|
|
format!("incremental-snapshot-200-240-{}.tar", Hash::default()),
|
|
|
|
format!("incremental-snapshot-200-260-{}.tar", Hash::default()),
|
|
|
|
format!("incremental-snapshot-200-280-{}.tar", Hash::default()),
|
|
|
|
] {
|
2022-05-10 13:37:41 -07:00
|
|
|
let snapshot_path = incremental_snapshot_archives_dir
|
|
|
|
.path()
|
|
|
|
.join(&snapshot_filenames);
|
2021-07-22 12:40:37 -07:00
|
|
|
File::create(snapshot_path).unwrap();
|
|
|
|
}
|
|
|
|
|
2022-05-10 13:37:41 -07:00
|
|
|
purge_old_snapshot_archives(
|
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
|
|
|
usize::MAX,
|
|
|
|
usize::MAX,
|
|
|
|
);
|
2021-07-22 12:40:37 -07:00
|
|
|
|
|
|
|
let remaining_incremental_snapshot_archives =
|
2022-05-10 13:37:41 -07:00
|
|
|
get_incremental_snapshot_archives(incremental_snapshot_archives_dir.path());
|
2021-07-22 12:40:37 -07:00
|
|
|
assert!(remaining_incremental_snapshot_archives.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Test roundtrip of bank to a full snapshot, then back again. This test creates the simplest
|
|
|
|
/// bank possible, so the contents of the snapshot archive will be quite minimal.
|
|
|
|
#[test]
|
|
|
|
fn test_roundtrip_bank_to_and_from_full_snapshot_simple() {
|
2021-07-01 10:20:56 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
let genesis_config = GenesisConfig::default();
|
2021-08-05 06:42:38 -07:00
|
|
|
let original_bank = Bank::new_for_tests(&genesis_config);
|
2021-07-01 10:20:56 -07:00
|
|
|
|
|
|
|
while !original_bank.is_complete() {
|
|
|
|
original_bank.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let accounts_dir = tempfile::TempDir::new().unwrap();
|
2021-08-21 13:41:03 -07:00
|
|
|
let bank_snapshots_dir = tempfile::TempDir::new().unwrap();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-07-01 10:20:56 -07:00
|
|
|
let snapshot_archive_format = ArchiveFormat::Tar;
|
|
|
|
|
2021-08-06 18:16:06 -07:00
|
|
|
let snapshot_archive_info = bank_to_full_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
&bank_snapshots_dir,
|
2021-07-01 10:20:56 -07:00
|
|
|
&original_bank,
|
|
|
|
None,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-07-01 10:20:56 -07:00
|
|
|
snapshot_archive_format,
|
2021-09-06 16:01:56 -07:00
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-07-01 10:20:56 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let (roundtrip_bank, _) = bank_from_snapshot_archives(
|
2021-07-01 10:20:56 -07:00
|
|
|
&[PathBuf::from(accounts_dir.path())],
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-08-06 18:16:06 -07:00
|
|
|
&snapshot_archive_info,
|
2021-07-22 12:40:37 -07:00
|
|
|
None,
|
2021-07-01 10:20:56 -07:00
|
|
|
&genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
&RuntimeConfig::default(),
|
2021-07-01 10:20:56 -07:00
|
|
|
None,
|
|
|
|
None,
|
|
|
|
AccountSecondaryIndexes::default(),
|
|
|
|
false,
|
|
|
|
None,
|
|
|
|
AccountShrinkThreshold::default(),
|
|
|
|
false,
|
2021-07-13 09:06:18 -07:00
|
|
|
false,
|
2021-08-04 15:28:33 -07:00
|
|
|
false,
|
2021-09-07 21:30:38 -07:00
|
|
|
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
|
2021-09-30 14:26:17 -07:00
|
|
|
None,
|
2022-09-12 11:51:12 -07:00
|
|
|
&Arc::default(),
|
2021-07-01 10:20:56 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
assert_eq!(original_bank, roundtrip_bank);
|
|
|
|
}
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
/// Test roundtrip of bank to a full snapshot, then back again. This test is more involved
|
|
|
|
/// than the simple version above; creating multiple banks over multiple slots and doing
|
|
|
|
/// multiple transfers. So this full snapshot should contain more data.
|
2021-07-01 10:20:56 -07:00
|
|
|
#[test]
|
2021-07-22 12:40:37 -07:00
|
|
|
fn test_roundtrip_bank_to_and_from_snapshot_complex() {
|
2021-07-01 10:20:56 -07:00
|
|
|
solana_logger::setup();
|
|
|
|
let collector = Pubkey::new_unique();
|
|
|
|
let key1 = Keypair::new();
|
|
|
|
let key2 = Keypair::new();
|
|
|
|
let key3 = Keypair::new();
|
|
|
|
let key4 = Keypair::new();
|
|
|
|
let key5 = Keypair::new();
|
|
|
|
|
2022-06-16 14:35:25 -07:00
|
|
|
let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.));
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank0
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank0
|
|
|
|
.transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank0
|
|
|
|
.transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey())
|
|
|
|
.unwrap();
|
2021-07-01 10:20:56 -07:00
|
|
|
while !bank0.is_complete() {
|
|
|
|
bank0.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = 1;
|
|
|
|
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank1
|
|
|
|
.transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank1
|
|
|
|
.transfer(sol_to_lamports(4.), &mint_keypair, &key4.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank1
|
|
|
|
.transfer(sol_to_lamports(5.), &mint_keypair, &key5.pubkey())
|
|
|
|
.unwrap();
|
2021-07-01 10:20:56 -07:00
|
|
|
while !bank1.is_complete() {
|
|
|
|
bank1.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank2
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
2021-07-01 10:20:56 -07:00
|
|
|
while !bank2.is_complete() {
|
|
|
|
bank2.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank3 = Arc::new(Bank::new_from_parent(&bank2, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank3
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
2021-07-01 10:20:56 -07:00
|
|
|
while !bank3.is_complete() {
|
|
|
|
bank3.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank4 = Arc::new(Bank::new_from_parent(&bank3, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank4
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
2021-07-01 10:20:56 -07:00
|
|
|
while !bank4.is_complete() {
|
|
|
|
bank4.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let accounts_dir = tempfile::TempDir::new().unwrap();
|
2021-08-21 13:41:03 -07:00
|
|
|
let bank_snapshots_dir = tempfile::TempDir::new().unwrap();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-08-04 15:07:55 -07:00
|
|
|
let snapshot_archive_format = ArchiveFormat::TarGzip;
|
2021-07-22 12:40:37 -07:00
|
|
|
|
2021-08-06 18:16:06 -07:00
|
|
|
let full_snapshot_archive_info = bank_to_full_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
&bank4,
|
|
|
|
None,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
snapshot_archive_format,
|
2021-09-06 16:01:56 -07:00
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-07-22 12:40:37 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let (roundtrip_bank, _) = bank_from_snapshot_archives(
|
|
|
|
&[PathBuf::from(accounts_dir.path())],
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-08-06 18:16:06 -07:00
|
|
|
&full_snapshot_archive_info,
|
2021-07-22 12:40:37 -07:00
|
|
|
None,
|
|
|
|
&genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
&RuntimeConfig::default(),
|
2021-07-22 12:40:37 -07:00
|
|
|
None,
|
|
|
|
None,
|
|
|
|
AccountSecondaryIndexes::default(),
|
|
|
|
false,
|
|
|
|
None,
|
|
|
|
AccountShrinkThreshold::default(),
|
|
|
|
false,
|
|
|
|
false,
|
2021-08-04 15:28:33 -07:00
|
|
|
false,
|
2021-09-07 21:30:38 -07:00
|
|
|
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
|
2021-09-30 14:26:17 -07:00
|
|
|
None,
|
2022-09-12 11:51:12 -07:00
|
|
|
&Arc::default(),
|
2021-07-22 12:40:37 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
assert_eq!(*bank4, roundtrip_bank);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Test roundtrip of bank to snapshots, then back again, with incremental snapshots. In this
|
|
|
|
/// version, build up a few slots and take a full snapshot. Continue on a few more slots and
|
|
|
|
/// take an incremental snapshot. Rebuild the bank from both the incremental snapshot and full
|
|
|
|
/// snapshot.
|
|
|
|
///
|
|
|
|
/// For the full snapshot, touch all the accounts, but only one for the incremental snapshot.
|
|
|
|
/// This is intended to mimic the real behavior of transactions, where only a small number of
|
|
|
|
/// accounts are modified often, which are captured by the incremental snapshot. The majority
|
|
|
|
/// of the accounts are not modified often, and are captured by the full snapshot.
|
|
|
|
#[test]
|
|
|
|
fn test_roundtrip_bank_to_and_from_incremental_snapshot() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let collector = Pubkey::new_unique();
|
|
|
|
let key1 = Keypair::new();
|
|
|
|
let key2 = Keypair::new();
|
|
|
|
let key3 = Keypair::new();
|
|
|
|
let key4 = Keypair::new();
|
|
|
|
let key5 = Keypair::new();
|
|
|
|
|
2022-06-16 14:35:25 -07:00
|
|
|
let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.));
|
2021-08-05 06:42:38 -07:00
|
|
|
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank0
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank0
|
|
|
|
.transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank0
|
|
|
|
.transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey())
|
|
|
|
.unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
while !bank0.is_complete() {
|
|
|
|
bank0.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = 1;
|
|
|
|
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank1
|
|
|
|
.transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank1
|
|
|
|
.transfer(sol_to_lamports(4.), &mint_keypair, &key4.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank1
|
|
|
|
.transfer(sol_to_lamports(5.), &mint_keypair, &key5.pubkey())
|
|
|
|
.unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
while !bank1.is_complete() {
|
|
|
|
bank1.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let accounts_dir = tempfile::TempDir::new().unwrap();
|
2021-08-21 13:41:03 -07:00
|
|
|
let bank_snapshots_dir = tempfile::TempDir::new().unwrap();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-08-04 15:07:55 -07:00
|
|
|
let snapshot_archive_format = ArchiveFormat::TarZstd;
|
2021-07-01 10:20:56 -07:00
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let full_snapshot_slot = slot;
|
2021-08-06 18:16:06 -07:00
|
|
|
let full_snapshot_archive_info = bank_to_full_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
&bank1,
|
|
|
|
None,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-07-22 12:40:37 -07:00
|
|
|
snapshot_archive_format,
|
2021-09-06 16:01:56 -07:00
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-07-22 12:40:37 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank2
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
while !bank2.is_complete() {
|
|
|
|
bank2.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank3 = Arc::new(Bank::new_from_parent(&bank2, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank3
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
while !bank3.is_complete() {
|
|
|
|
bank3.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank4 = Arc::new(Bank::new_from_parent(&bank3, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank4
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
2021-07-22 12:40:37 -07:00
|
|
|
while !bank4.is_complete() {
|
|
|
|
bank4.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
2021-08-06 18:16:06 -07:00
|
|
|
let incremental_snapshot_archive_info = bank_to_incremental_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-07-01 10:20:56 -07:00
|
|
|
&bank4,
|
2021-07-22 12:40:37 -07:00
|
|
|
full_snapshot_slot,
|
2021-07-01 10:20:56 -07:00
|
|
|
None,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-07-01 10:20:56 -07:00
|
|
|
snapshot_archive_format,
|
2021-09-06 16:01:56 -07:00
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-07-01 10:20:56 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2021-07-22 12:40:37 -07:00
|
|
|
let (roundtrip_bank, _) = bank_from_snapshot_archives(
|
2021-07-01 10:20:56 -07:00
|
|
|
&[PathBuf::from(accounts_dir.path())],
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-08-06 18:16:06 -07:00
|
|
|
&full_snapshot_archive_info,
|
|
|
|
Some(&incremental_snapshot_archive_info),
|
2021-07-01 10:20:56 -07:00
|
|
|
&genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
&RuntimeConfig::default(),
|
2021-07-01 10:20:56 -07:00
|
|
|
None,
|
|
|
|
None,
|
|
|
|
AccountSecondaryIndexes::default(),
|
|
|
|
false,
|
|
|
|
None,
|
|
|
|
AccountShrinkThreshold::default(),
|
|
|
|
false,
|
2021-07-13 09:06:18 -07:00
|
|
|
false,
|
2021-08-04 15:28:33 -07:00
|
|
|
false,
|
2021-09-07 21:30:38 -07:00
|
|
|
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
|
2021-09-30 14:26:17 -07:00
|
|
|
None,
|
2022-09-12 11:51:12 -07:00
|
|
|
&Arc::default(),
|
2021-07-01 10:20:56 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
assert_eq!(*bank4, roundtrip_bank);
|
|
|
|
}
|
2021-08-06 18:16:06 -07:00
|
|
|
|
|
|
|
/// Test rebuilding bank from the latest snapshot archives
|
|
|
|
#[test]
|
|
|
|
fn test_bank_from_latest_snapshot_archives() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let collector = Pubkey::new_unique();
|
|
|
|
let key1 = Keypair::new();
|
|
|
|
let key2 = Keypair::new();
|
|
|
|
let key3 = Keypair::new();
|
|
|
|
|
2022-06-16 14:35:25 -07:00
|
|
|
let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.));
|
2021-08-06 18:16:06 -07:00
|
|
|
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank0
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank0
|
|
|
|
.transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank0
|
|
|
|
.transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey())
|
|
|
|
.unwrap();
|
2021-08-06 18:16:06 -07:00
|
|
|
while !bank0.is_complete() {
|
|
|
|
bank0.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = 1;
|
|
|
|
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank1
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank1
|
|
|
|
.transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
bank1
|
|
|
|
.transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey())
|
|
|
|
.unwrap();
|
2021-08-06 18:16:06 -07:00
|
|
|
while !bank1.is_complete() {
|
|
|
|
bank1.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let accounts_dir = tempfile::TempDir::new().unwrap();
|
2021-08-21 13:41:03 -07:00
|
|
|
let bank_snapshots_dir = tempfile::TempDir::new().unwrap();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-08-06 18:16:06 -07:00
|
|
|
let snapshot_archive_format = ArchiveFormat::Tar;
|
|
|
|
|
|
|
|
let full_snapshot_slot = slot;
|
|
|
|
bank_to_full_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
&bank_snapshots_dir,
|
2021-08-06 18:16:06 -07:00
|
|
|
&bank1,
|
|
|
|
None,
|
2022-05-10 13:37:41 -07:00
|
|
|
&full_snapshot_archives_dir,
|
|
|
|
&incremental_snapshot_archives_dir,
|
2021-08-06 18:16:06 -07:00
|
|
|
snapshot_archive_format,
|
2021-09-06 16:01:56 -07:00
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-08-06 18:16:06 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank2
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
2021-08-06 18:16:06 -07:00
|
|
|
while !bank2.is_complete() {
|
|
|
|
bank2.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank3 = Arc::new(Bank::new_from_parent(&bank2, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank3
|
|
|
|
.transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey())
|
|
|
|
.unwrap();
|
2021-08-06 18:16:06 -07:00
|
|
|
while !bank3.is_complete() {
|
|
|
|
bank3.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank4 = Arc::new(Bank::new_from_parent(&bank3, &collector, slot));
|
2022-06-16 14:35:25 -07:00
|
|
|
bank4
|
|
|
|
.transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey())
|
|
|
|
.unwrap();
|
2021-08-06 18:16:06 -07:00
|
|
|
while !bank4.is_complete() {
|
|
|
|
bank4.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
bank_to_incremental_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
&bank_snapshots_dir,
|
2021-08-06 18:16:06 -07:00
|
|
|
&bank4,
|
|
|
|
full_snapshot_slot,
|
|
|
|
None,
|
2022-05-10 13:37:41 -07:00
|
|
|
&full_snapshot_archives_dir,
|
|
|
|
&incremental_snapshot_archives_dir,
|
2021-08-06 18:16:06 -07:00
|
|
|
snapshot_archive_format,
|
2021-09-06 16:01:56 -07:00
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-08-06 18:16:06 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2021-09-07 13:43:43 -07:00
|
|
|
let (deserialized_bank, ..) = bank_from_latest_snapshot_archives(
|
2021-08-21 13:41:03 -07:00
|
|
|
&bank_snapshots_dir,
|
2022-05-10 13:37:41 -07:00
|
|
|
&full_snapshot_archives_dir,
|
|
|
|
&incremental_snapshot_archives_dir,
|
2021-08-06 18:16:06 -07:00
|
|
|
&[accounts_dir.as_ref().to_path_buf()],
|
|
|
|
&genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
&RuntimeConfig::default(),
|
2021-08-06 18:16:06 -07:00
|
|
|
None,
|
|
|
|
None,
|
|
|
|
AccountSecondaryIndexes::default(),
|
|
|
|
false,
|
|
|
|
None,
|
|
|
|
AccountShrinkThreshold::default(),
|
|
|
|
false,
|
|
|
|
false,
|
|
|
|
false,
|
2021-09-07 21:30:38 -07:00
|
|
|
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
|
2021-09-30 14:26:17 -07:00
|
|
|
None,
|
2022-09-12 11:51:12 -07:00
|
|
|
&Arc::default(),
|
2021-08-06 18:16:06 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
assert_eq!(deserialized_bank, *bank4);
|
|
|
|
}
|
2021-08-12 13:56:08 -07:00
|
|
|
|
|
|
|
/// Test that cleaning works well in the edge cases of zero-lamport accounts and snapshots.
|
|
|
|
/// Here's the scenario:
|
|
|
|
///
|
|
|
|
/// slot 1:
|
|
|
|
/// - send some lamports to Account1 (from Account2) to bring it to life
|
|
|
|
/// - take a full snapshot
|
|
|
|
/// slot 2:
|
|
|
|
/// - make Account1 have zero lamports (send back to Account2)
|
|
|
|
/// - take an incremental snapshot
|
|
|
|
/// - ensure deserializing from this snapshot is equal to this bank
|
|
|
|
/// slot 3:
|
|
|
|
/// - remove Account2's reference back to slot 2 by transfering from the mint to Account2
|
|
|
|
/// slot 4:
|
|
|
|
/// - ensure `clean_accounts()` has run and that Account1 is gone
|
|
|
|
/// - take another incremental snapshot
|
|
|
|
/// - ensure deserializing from this snapshots is equal to this bank
|
|
|
|
/// - ensure Account1 hasn't come back from the dead
|
|
|
|
///
|
|
|
|
/// The check at slot 4 will fail with the pre-incremental-snapshot cleaning logic. Because
|
|
|
|
/// of the cleaning/purging at slot 4, the incremental snapshot at slot 4 will no longer have
|
|
|
|
/// information about Account1, but the full snapshost _does_ have info for Account1, which is
|
|
|
|
/// no longer correct!
|
|
|
|
#[test]
|
|
|
|
fn test_incremental_snapshots_handle_zero_lamport_accounts() {
|
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
let collector = Pubkey::new_unique();
|
|
|
|
let key1 = Keypair::new();
|
|
|
|
let key2 = Keypair::new();
|
|
|
|
|
|
|
|
let accounts_dir = tempfile::TempDir::new().unwrap();
|
2021-08-21 13:41:03 -07:00
|
|
|
let bank_snapshots_dir = tempfile::TempDir::new().unwrap();
|
2022-05-10 13:37:41 -07:00
|
|
|
let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap();
|
2021-08-12 13:56:08 -07:00
|
|
|
let snapshot_archive_format = ArchiveFormat::Tar;
|
|
|
|
|
2022-06-16 14:35:25 -07:00
|
|
|
let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.));
|
2021-08-12 13:56:08 -07:00
|
|
|
|
2022-06-16 14:35:25 -07:00
|
|
|
let lamports_to_transfer = sol_to_lamports(123_456.);
|
2021-08-12 13:56:08 -07:00
|
|
|
let bank0 = Arc::new(Bank::new_with_paths_for_tests(
|
|
|
|
&genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
Arc::<RuntimeConfig>::default(),
|
2021-08-12 13:56:08 -07:00
|
|
|
vec![accounts_dir.path().to_path_buf()],
|
|
|
|
AccountSecondaryIndexes::default(),
|
|
|
|
false,
|
|
|
|
AccountShrinkThreshold::default(),
|
|
|
|
));
|
|
|
|
bank0
|
|
|
|
.transfer(lamports_to_transfer, &mint_keypair, &key2.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
while !bank0.is_complete() {
|
|
|
|
bank0.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = 1;
|
|
|
|
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &collector, slot));
|
|
|
|
bank1
|
|
|
|
.transfer(lamports_to_transfer, &key2, &key1.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
while !bank1.is_complete() {
|
|
|
|
bank1.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let full_snapshot_slot = slot;
|
|
|
|
let full_snapshot_archive_info = bank_to_full_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-08-12 13:56:08 -07:00
|
|
|
&bank1,
|
|
|
|
None,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-08-12 13:56:08 -07:00
|
|
|
snapshot_archive_format,
|
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-09-06 16:01:56 -07:00
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-08-12 13:56:08 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &collector, slot));
|
2021-10-29 13:52:59 -07:00
|
|
|
let blockhash = bank2.last_blockhash();
|
2021-10-27 10:09:16 -07:00
|
|
|
let tx = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer(
|
2021-08-12 13:56:08 -07:00
|
|
|
&key1,
|
|
|
|
&key2.pubkey(),
|
|
|
|
lamports_to_transfer,
|
2021-10-29 13:52:59 -07:00
|
|
|
blockhash,
|
2021-10-27 10:09:16 -07:00
|
|
|
));
|
2021-10-29 13:52:59 -07:00
|
|
|
let fee = bank2.get_fee_for_message(tx.message()).unwrap();
|
2021-08-12 13:56:08 -07:00
|
|
|
let tx = system_transaction::transfer(
|
|
|
|
&key1,
|
|
|
|
&key2.pubkey(),
|
|
|
|
lamports_to_transfer - fee,
|
2021-10-29 13:52:59 -07:00
|
|
|
blockhash,
|
2021-08-12 13:56:08 -07:00
|
|
|
);
|
|
|
|
bank2.process_transaction(&tx).unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
bank2.get_balance(&key1.pubkey()),
|
|
|
|
0,
|
|
|
|
"Ensure Account1's balance is zero"
|
|
|
|
);
|
|
|
|
while !bank2.is_complete() {
|
|
|
|
bank2.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Take an incremental snapshot and then do a roundtrip on the bank and ensure it
|
|
|
|
// deserializes correctly.
|
|
|
|
let incremental_snapshot_archive_info = bank_to_incremental_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-08-12 13:56:08 -07:00
|
|
|
&bank2,
|
|
|
|
full_snapshot_slot,
|
|
|
|
None,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-08-12 13:56:08 -07:00
|
|
|
snapshot_archive_format,
|
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-09-06 16:01:56 -07:00
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-08-12 13:56:08 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
let (deserialized_bank, _) = bank_from_snapshot_archives(
|
|
|
|
&[accounts_dir.path().to_path_buf()],
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-08-12 13:56:08 -07:00
|
|
|
&full_snapshot_archive_info,
|
|
|
|
Some(&incremental_snapshot_archive_info),
|
|
|
|
&genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
&RuntimeConfig::default(),
|
2021-08-12 13:56:08 -07:00
|
|
|
None,
|
|
|
|
None,
|
|
|
|
AccountSecondaryIndexes::default(),
|
|
|
|
false,
|
|
|
|
None,
|
|
|
|
AccountShrinkThreshold::default(),
|
|
|
|
false,
|
|
|
|
false,
|
|
|
|
false,
|
2021-09-07 21:30:38 -07:00
|
|
|
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
|
2021-09-30 14:26:17 -07:00
|
|
|
None,
|
2022-09-12 11:51:12 -07:00
|
|
|
&Arc::default(),
|
2021-08-12 13:56:08 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
deserialized_bank, *bank2,
|
|
|
|
"Ensure rebuilding from an incremental snapshot works"
|
|
|
|
);
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank3 = Arc::new(Bank::new_from_parent(&bank2, &collector, slot));
|
|
|
|
// Update Account2 so that it no longer holds a reference to slot2
|
|
|
|
bank3
|
|
|
|
.transfer(lamports_to_transfer, &mint_keypair, &key2.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
while !bank3.is_complete() {
|
|
|
|
bank3.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank4 = Arc::new(Bank::new_from_parent(&bank3, &collector, slot));
|
|
|
|
while !bank4.is_complete() {
|
|
|
|
bank4.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure account1 has been cleaned/purged from everywhere
|
|
|
|
bank4.squash();
|
2022-08-19 15:15:04 -07:00
|
|
|
bank4.clean_accounts(Some(full_snapshot_slot));
|
2021-08-12 13:56:08 -07:00
|
|
|
assert!(
|
|
|
|
bank4.get_account_modified_slot(&key1.pubkey()).is_none(),
|
|
|
|
"Ensure Account1 has been cleaned and purged from AccountsDb"
|
|
|
|
);
|
|
|
|
|
|
|
|
// Take an incremental snapshot and then do a roundtrip on the bank and ensure it
|
|
|
|
// deserializes correctly
|
|
|
|
let incremental_snapshot_archive_info = bank_to_incremental_snapshot_archive(
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-08-12 13:56:08 -07:00
|
|
|
&bank4,
|
|
|
|
full_snapshot_slot,
|
|
|
|
None,
|
2022-05-10 13:37:41 -07:00
|
|
|
full_snapshot_archives_dir.path(),
|
|
|
|
incremental_snapshot_archives_dir.path(),
|
2021-08-12 13:56:08 -07:00
|
|
|
snapshot_archive_format,
|
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-09-06 16:01:56 -07:00
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
2021-08-12 13:56:08 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let (deserialized_bank, _) = bank_from_snapshot_archives(
|
|
|
|
&[accounts_dir.path().to_path_buf()],
|
2021-08-21 13:41:03 -07:00
|
|
|
bank_snapshots_dir.path(),
|
2021-08-12 13:56:08 -07:00
|
|
|
&full_snapshot_archive_info,
|
|
|
|
Some(&incremental_snapshot_archive_info),
|
|
|
|
&genesis_config,
|
2022-08-05 12:49:00 -07:00
|
|
|
&RuntimeConfig::default(),
|
2021-08-12 13:56:08 -07:00
|
|
|
None,
|
|
|
|
None,
|
|
|
|
AccountSecondaryIndexes::default(),
|
|
|
|
false,
|
|
|
|
None,
|
|
|
|
AccountShrinkThreshold::default(),
|
|
|
|
false,
|
|
|
|
false,
|
|
|
|
false,
|
2021-09-07 21:30:38 -07:00
|
|
|
Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
|
2021-09-30 14:26:17 -07:00
|
|
|
None,
|
2022-09-12 11:51:12 -07:00
|
|
|
&Arc::default(),
|
2021-08-12 13:56:08 -07:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
deserialized_bank, *bank4,
|
|
|
|
"Ensure rebuilding from an incremental snapshot works",
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
deserialized_bank
|
|
|
|
.get_account_modified_slot(&key1.pubkey())
|
|
|
|
.is_none(),
|
|
|
|
"Ensure Account1 has not been brought back from the dead"
|
|
|
|
);
|
|
|
|
}
|
2022-04-06 03:47:19 -07:00
|
|
|
|
2022-07-06 15:30:30 -07:00
|
|
|
#[test]
|
|
|
|
fn test_bank_fields_from_snapshot() {
|
|
|
|
solana_logger::setup();
|
|
|
|
let collector = Pubkey::new_unique();
|
|
|
|
let key1 = Keypair::new();
|
|
|
|
|
|
|
|
let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.));
|
|
|
|
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
|
|
|
|
while !bank0.is_complete() {
|
|
|
|
bank0.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let slot = 1;
|
|
|
|
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &collector, slot));
|
|
|
|
while !bank1.is_complete() {
|
|
|
|
bank1.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
let all_snapshots_dir = tempfile::TempDir::new().unwrap();
|
|
|
|
let snapshot_archive_format = ArchiveFormat::Tar;
|
|
|
|
|
|
|
|
let full_snapshot_slot = slot;
|
|
|
|
bank_to_full_snapshot_archive(
|
|
|
|
&all_snapshots_dir,
|
|
|
|
&bank1,
|
|
|
|
None,
|
|
|
|
&all_snapshots_dir,
|
|
|
|
&all_snapshots_dir,
|
|
|
|
snapshot_archive_format,
|
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let slot = slot + 1;
|
|
|
|
let bank2 = Arc::new(Bank::new_from_parent(&bank1, &collector, slot));
|
|
|
|
bank2
|
|
|
|
.transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey())
|
|
|
|
.unwrap();
|
|
|
|
while !bank2.is_complete() {
|
|
|
|
bank2.register_tick(&Hash::new_unique());
|
|
|
|
}
|
|
|
|
|
|
|
|
bank_to_incremental_snapshot_archive(
|
|
|
|
&all_snapshots_dir,
|
|
|
|
&bank2,
|
|
|
|
full_snapshot_slot,
|
|
|
|
None,
|
|
|
|
&all_snapshots_dir,
|
|
|
|
&all_snapshots_dir,
|
|
|
|
snapshot_archive_format,
|
|
|
|
DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let bank_fields = bank_fields_from_snapshot_archives(
|
|
|
|
&all_snapshots_dir,
|
|
|
|
&all_snapshots_dir,
|
|
|
|
&all_snapshots_dir,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(bank_fields.slot, bank2.slot());
|
|
|
|
assert_eq!(bank_fields.parent_slot, bank2.parent_slot());
|
|
|
|
}
|
|
|
|
|
2022-08-18 06:48:58 -07:00
|
|
|
#[test]
|
|
|
|
fn test_verify_slot_deltas_structural_good() {
|
|
|
|
// NOTE: slot deltas do not need to be sorted
|
|
|
|
let slot_deltas = vec![
|
|
|
|
(222, true, Status::default()),
|
|
|
|
(333, true, Status::default()),
|
|
|
|
(111, true, Status::default()),
|
|
|
|
];
|
|
|
|
|
|
|
|
let bank_slot = 333;
|
|
|
|
let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot);
|
|
|
|
assert_eq!(
|
|
|
|
result,
|
|
|
|
Ok(VerifySlotDeltasStructuralInfo {
|
|
|
|
slots: HashSet::from([111, 222, 333])
|
|
|
|
})
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_verify_slot_deltas_structural_bad_too_many_entries() {
|
|
|
|
let bank_slot = status_cache::MAX_CACHE_ENTRIES as Slot + 1;
|
|
|
|
let slot_deltas: Vec<_> = (0..bank_slot)
|
|
|
|
.map(|slot| (slot, true, Status::default()))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot);
|
|
|
|
assert_eq!(
|
|
|
|
result,
|
|
|
|
Err(VerifySlotDeltasError::TooManyEntries(
|
|
|
|
status_cache::MAX_CACHE_ENTRIES + 1,
|
|
|
|
status_cache::MAX_CACHE_ENTRIES
|
|
|
|
)),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_verify_slot_deltas_structural_bad_slot_not_root() {
|
|
|
|
let slot_deltas = vec![
|
|
|
|
(111, true, Status::default()),
|
|
|
|
(222, false, Status::default()), // <-- slot is not a root
|
|
|
|
(333, true, Status::default()),
|
|
|
|
];
|
|
|
|
|
|
|
|
let bank_slot = 333;
|
|
|
|
let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot);
|
|
|
|
assert_eq!(result, Err(VerifySlotDeltasError::SlotIsNotRoot(222)));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_verify_slot_deltas_structural_bad_slot_greater_than_bank() {
|
|
|
|
let slot_deltas = vec![
|
|
|
|
(222, true, Status::default()),
|
|
|
|
(111, true, Status::default()),
|
|
|
|
(555, true, Status::default()), // <-- slot is greater than the bank slot
|
|
|
|
];
|
|
|
|
|
|
|
|
let bank_slot = 444;
|
|
|
|
let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot);
|
|
|
|
assert_eq!(
|
|
|
|
result,
|
|
|
|
Err(VerifySlotDeltasError::SlotGreaterThanMaxRoot(
|
|
|
|
555, bank_slot
|
|
|
|
)),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_verify_slot_deltas_structural_bad_slot_has_multiple_entries() {
|
|
|
|
let slot_deltas = vec![
|
|
|
|
(111, true, Status::default()),
|
|
|
|
(222, true, Status::default()),
|
|
|
|
(111, true, Status::default()), // <-- slot is a duplicate
|
|
|
|
];
|
|
|
|
|
|
|
|
let bank_slot = 222;
|
|
|
|
let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot);
|
|
|
|
assert_eq!(
|
|
|
|
result,
|
|
|
|
Err(VerifySlotDeltasError::SlotHasMultipleEntries(111)),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_verify_slot_deltas_with_history_good() {
|
|
|
|
let mut slots_from_slot_deltas = HashSet::default();
|
|
|
|
let mut slot_history = SlotHistory::default();
|
|
|
|
// note: slot history expects slots to be added in numeric order
|
|
|
|
for slot in [0, 111, 222, 333, 444] {
|
|
|
|
slots_from_slot_deltas.insert(slot);
|
|
|
|
slot_history.add(slot);
|
|
|
|
}
|
|
|
|
|
|
|
|
let bank_slot = 444;
|
|
|
|
let result =
|
|
|
|
verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot);
|
|
|
|
assert_eq!(result, Ok(()));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_verify_slot_deltas_with_history_bad_slot_history() {
|
|
|
|
let bank_slot = 444;
|
|
|
|
let result = verify_slot_deltas_with_history(
|
|
|
|
&HashSet::default(),
|
|
|
|
&SlotHistory::default(), // <-- will only have an entry for slot 0
|
|
|
|
bank_slot,
|
|
|
|
);
|
|
|
|
assert_eq!(result, Err(VerifySlotDeltasError::BadSlotHistory));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_verify_slot_deltas_with_history_bad_slot_not_in_history() {
|
|
|
|
let slots_from_slot_deltas = HashSet::from([
|
|
|
|
0, // slot history has slot 0 added by default
|
|
|
|
444, 222,
|
|
|
|
]);
|
|
|
|
let mut slot_history = SlotHistory::default();
|
|
|
|
slot_history.add(444); // <-- slot history is missing slot 222
|
|
|
|
|
|
|
|
let bank_slot = 444;
|
|
|
|
let result =
|
|
|
|
verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
result,
|
|
|
|
Err(VerifySlotDeltasError::SlotNotFoundInHistory(222)),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_verify_slot_deltas_with_history_bad_slot_not_in_deltas() {
|
|
|
|
let slots_from_slot_deltas = HashSet::from([
|
|
|
|
0, // slot history has slot 0 added by default
|
|
|
|
444, 222,
|
|
|
|
// <-- slot deltas is missing slot 333
|
|
|
|
]);
|
|
|
|
let mut slot_history = SlotHistory::default();
|
|
|
|
slot_history.add(222);
|
|
|
|
slot_history.add(333);
|
|
|
|
slot_history.add(444);
|
|
|
|
|
|
|
|
let bank_slot = 444;
|
|
|
|
let result =
|
|
|
|
verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
result,
|
|
|
|
Err(VerifySlotDeltasError::SlotNotFoundInDeltas(333)),
|
|
|
|
);
|
|
|
|
}
|
2020-01-09 16:49:36 -08:00
|
|
|
}
|