Remove circular dependencies in blocktree (#6494)

* Delete dead code

* Flatten modules

* Break blocktree dependency cycle

* Move BloctreeError into blocktree_db

Fewer dependency cycles

* Inline column family names

Fewer circular dependencies

* Cleanup imports

* Fix build
This commit is contained in:
Greg Fitzgerald 2019-10-22 09:20:19 -06:00 committed by GitHub
parent 75d68edfe7
commit 45b2c138e5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 82 additions and 181 deletions

View File

@ -1,49 +1,39 @@
//! The `blocktree` module provides functions for parallel verification of the
//! Proof of History ledger as well as iterative read, append write, and random
//! access read to a persistent file-based ledger.
use crate::blocktree_db::{self, columns as cf, Column, IteratorDirection, IteratorMode};
pub use crate::blocktree_db::{BlocktreeError, Result};
pub use crate::blocktree_meta::SlotMeta;
use crate::blocktree_meta::*;
use crate::entry::{create_ticks, Entry};
use crate::erasure::ErasureConfig;
use crate::leader_schedule_cache::LeaderScheduleCache;
use crate::shred::{Shred, Shredder};
use bincode::deserialize;
use log::*;
use rayon::iter::IntoParallelRefIterator;
use rayon::iter::ParallelIterator;
use rayon::ThreadPool;
use rocksdb;
use rocksdb::DBRawIterator;
use solana_metrics::{datapoint_debug, datapoint_error};
use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::clock::Slot;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::cell::RefCell;
use std::cmp;
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use std::result;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError};
use std::sync::{Arc, RwLock};
pub use self::meta::*;
use crate::leader_schedule_cache::LeaderScheduleCache;
use solana_sdk::clock::Slot;
mod db;
mod meta;
use db::{columns as cf, Column, IteratorDirection, IteratorMode};
use rocksdb::DBRawIterator;
type Database = db::Database;
type LedgerColumn<C> = db::LedgerColumn<C>;
type WriteBatch = db::WriteBatch;
type BatchProcessor = db::BatchProcessor;
type Database = blocktree_db::Database;
type LedgerColumn<C> = blocktree_db::LedgerColumn<C>;
type WriteBatch = blocktree_db::WriteBatch;
type BatchProcessor = blocktree_db::BatchProcessor;
pub const BLOCKTREE_DIRECTORY: &str = "rocksdb";
@ -57,37 +47,6 @@ pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000;
pub type SlotMetaWorkingSetEntry = (Rc<RefCell<SlotMeta>>, Option<SlotMeta>);
pub type CompletedSlotsReceiver = Receiver<Vec<u64>>;
#[derive(Debug)]
pub enum BlocktreeError {
ShredForIndexExists,
InvalidShredData(Box<bincode::ErrorKind>),
RocksDb(rocksdb::Error),
SlotNotRooted,
IO(std::io::Error),
Serialize(std::boxed::Box<bincode::ErrorKind>),
}
pub type Result<T> = result::Result<T, BlocktreeError>;
impl std::error::Error for BlocktreeError {}
impl std::fmt::Display for BlocktreeError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "blocktree error")
}
}
impl std::convert::From<std::io::Error> for BlocktreeError {
fn from(e: std::io::Error) -> BlocktreeError {
BlocktreeError::IO(e)
}
}
impl std::convert::From<std::boxed::Box<bincode::ErrorKind>> for BlocktreeError {
fn from(e: std::boxed::Box<bincode::ErrorKind>) -> BlocktreeError {
BlocktreeError::Serialize(e)
}
}
// ledger window
pub struct Blocktree {
db: Arc<Database>,
@ -104,22 +63,6 @@ pub struct Blocktree {
pub completed_slots_senders: Vec<SyncSender<Vec<u64>>>,
}
// Column family for metadata about a leader slot
pub const META_CF: &str = "meta";
// Column family for slots that have been marked as dead
pub const DEAD_SLOTS_CF: &str = "dead_slots";
pub const ERASURE_META_CF: &str = "erasure_meta";
// Column family for orphans data
pub const ORPHANS_CF: &str = "orphans";
// Column family for root data
pub const ROOT_CF: &str = "root";
/// Column family for indexes
pub const INDEX_CF: &str = "index";
/// Column family for Data Shreds
pub const DATA_SHRED_CF: &str = "data_shred";
/// Column family for Code Shreds
pub const CODE_SHRED_CF: &str = "code_shred";
impl Blocktree {
/// Opens a Ledger in directory, provides "infinite" window of shreds
pub fn open(ledger_path: &Path) -> Result<Blocktree> {
@ -1644,7 +1587,7 @@ pub fn create_new_ledger(ledger_path: &Path, genesis_block: &GenesisBlock) -> Re
// Fill slot 0 with ticks that link back to the genesis_block to bootstrap the ledger.
let blocktree = Blocktree::open(ledger_path)?;
let entries = crate::entry::create_ticks(ticks_per_slot, genesis_block.hash());
let entries = create_ticks(ticks_per_slot, genesis_block.hash());
let last_hash = entries.last().unwrap().hash;
let shredder = Shredder::new(0, 0, 0.0, Arc::new(Keypair::new()))

View File

@ -1,5 +1,4 @@
use crate::blocktree::{BlocktreeError, Result};
use crate::blocktree_meta;
use bincode::{deserialize, serialize};
use byteorder::{BigEndian, ByteOrder};
use log::*;
@ -25,6 +24,53 @@ const TOTAL_THREADS: i32 = 8;
const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB
const MIN_WRITE_BUFFER_SIZE: u64 = 64 * 1024; // 64KB
// Column family for metadata about a leader slot
const META_CF: &str = "meta";
// Column family for slots that have been marked as dead
const DEAD_SLOTS_CF: &str = "dead_slots";
const ERASURE_META_CF: &str = "erasure_meta";
// Column family for orphans data
const ORPHANS_CF: &str = "orphans";
// Column family for root data
const ROOT_CF: &str = "root";
/// Column family for indexes
const INDEX_CF: &str = "index";
/// Column family for Data Shreds
const DATA_SHRED_CF: &str = "data_shred";
/// Column family for Code Shreds
const CODE_SHRED_CF: &str = "code_shred";
#[derive(Debug)]
pub enum BlocktreeError {
ShredForIndexExists,
InvalidShredData(Box<bincode::ErrorKind>),
RocksDb(rocksdb::Error),
SlotNotRooted,
IO(std::io::Error),
Serialize(std::boxed::Box<bincode::ErrorKind>),
}
pub type Result<T> = std::result::Result<T, BlocktreeError>;
impl std::error::Error for BlocktreeError {}
impl std::fmt::Display for BlocktreeError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "blocktree error")
}
}
impl std::convert::From<std::io::Error> for BlocktreeError {
fn from(e: std::io::Error) -> BlocktreeError {
BlocktreeError::IO(e)
}
}
impl std::convert::From<std::boxed::Box<bincode::ErrorKind>> for BlocktreeError {
fn from(e: std::boxed::Box<bincode::ErrorKind>) -> BlocktreeError {
BlocktreeError::Serialize(e)
}
}
pub enum IteratorMode<Index> {
Start,
End,
@ -76,7 +122,7 @@ struct Rocks(rocksdb::DB);
impl Rocks {
fn open(path: &Path) -> Result<Rocks> {
use crate::blocktree::db::columns::{
use columns::{
DeadSlots, ErasureMeta, Index, Orphans, Root, ShredCode, ShredData, SlotMeta,
};
@ -121,7 +167,7 @@ impl Rocks {
}
fn columns(&self) -> Vec<&'static str> {
use crate::blocktree::db::columns::{
use columns::{
DeadSlots, ErasureMeta, Index, Orphans, Root, ShredCode, ShredData, SlotMeta,
};
@ -213,7 +259,7 @@ pub trait TypedColumn: Column {
}
impl Column for columns::ShredCode {
const NAME: &'static str = super::CODE_SHRED_CF;
const NAME: &'static str = CODE_SHRED_CF;
type Index = (u64, u64);
fn key(index: (u64, u64)) -> Vec<u8> {
@ -234,7 +280,7 @@ impl Column for columns::ShredCode {
}
impl Column for columns::ShredData {
const NAME: &'static str = super::DATA_SHRED_CF;
const NAME: &'static str = DATA_SHRED_CF;
type Index = (u64, u64);
fn key((slot, index): (u64, u64)) -> Vec<u8> {
@ -260,7 +306,7 @@ impl Column for columns::ShredData {
}
impl Column for columns::Index {
const NAME: &'static str = super::INDEX_CF;
const NAME: &'static str = INDEX_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
@ -283,11 +329,11 @@ impl Column for columns::Index {
}
impl TypedColumn for columns::Index {
type Type = crate::blocktree::meta::Index;
type Type = blocktree_meta::Index;
}
impl Column for columns::DeadSlots {
const NAME: &'static str = super::DEAD_SLOTS_CF;
const NAME: &'static str = DEAD_SLOTS_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
@ -314,7 +360,7 @@ impl TypedColumn for columns::DeadSlots {
}
impl Column for columns::Orphans {
const NAME: &'static str = super::ORPHANS_CF;
const NAME: &'static str = ORPHANS_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
@ -341,7 +387,7 @@ impl TypedColumn for columns::Orphans {
}
impl Column for columns::Root {
const NAME: &'static str = super::ROOT_CF;
const NAME: &'static str = ROOT_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
@ -368,7 +414,7 @@ impl TypedColumn for columns::Root {
}
impl Column for columns::SlotMeta {
const NAME: &'static str = super::META_CF;
const NAME: &'static str = META_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
@ -391,11 +437,11 @@ impl Column for columns::SlotMeta {
}
impl TypedColumn for columns::SlotMeta {
type Type = super::SlotMeta;
type Type = blocktree_meta::SlotMeta;
}
impl Column for columns::ErasureMeta {
const NAME: &'static str = super::ERASURE_META_CF;
const NAME: &'static str = ERASURE_META_CF;
type Index = (u64, u64);
fn index(key: &[u8]) -> (u64, u64) {
@ -422,7 +468,7 @@ impl Column for columns::ErasureMeta {
}
impl TypedColumn for columns::ErasureMeta {
type Type = super::ErasureMeta;
type Type = blocktree_meta::ErasureMeta;
}
#[derive(Debug, Clone)]
@ -700,7 +746,7 @@ impl std::convert::From<rocksdb::Error> for BlocktreeError {
}
fn get_cf_options(name: &'static str) -> Options {
use crate::blocktree::db::columns::{ErasureMeta, Index, ShredCode, ShredData};
use columns::{ErasureMeta, Index, ShredCode, ShredData};
let mut options = Options::default();
match name {

View File

@ -1,8 +1,7 @@
use crate::erasure::ErasureConfig;
use serde::{Deserialize, Serialize};
use solana_metrics::datapoint;
use std::cmp::Ordering;
use std::{collections::BTreeSet, ops::Range, ops::RangeBounds};
use std::{collections::BTreeSet, ops::RangeBounds};
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
// The Meta column family
@ -31,51 +30,6 @@ pub struct SlotMeta {
pub completed_data_indexes: Vec<u32>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
pub struct ErasureSetRanges {
r: Vec<Range<u64>>,
}
impl ErasureSetRanges {
pub fn insert(&mut self, start: u64, end: u64) -> Result<usize, Range<u64>> {
let range = if start < end {
(start..end)
} else {
(end..start)
};
match self.pos(range.start) {
Ok(pos) => Err(self.r[pos].clone()),
Err(pos) => {
self.r.insert(pos, range);
Ok(pos)
}
}
}
fn pos(&self, seek: u64) -> Result<usize, usize> {
self.r.binary_search_by(|probe| {
if probe.contains(&seek) {
Ordering::Equal
} else {
probe.start.cmp(&seek)
}
})
}
pub fn lookup(&self, seek: u64) -> Result<Range<u64>, usize> {
self.pos(seek)
.map(|pos| self.r[pos].clone())
.or_else(|epos| {
if epos < self.r.len() && self.r[epos].contains(&seek) {
Ok(self.r[epos].clone())
} else {
Err(epos)
}
})
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
/// Index recording presence/absence of blobs
pub struct Index {
@ -116,7 +70,7 @@ pub enum ErasureMetaStatus {
}
impl Index {
pub(in crate::blocktree) fn new(slot: u64) -> Self {
pub(crate) fn new(slot: u64) -> Self {
Index {
slot,
data: DataIndex::default(),
@ -220,7 +174,7 @@ impl SlotMeta {
self.parent_slot != std::u64::MAX
}
pub(in crate::blocktree) fn new(slot: u64, parent_slot: u64) -> Self {
pub(crate) fn new(slot: u64, parent_slot: u64) -> Self {
SlotMeta {
slot,
consumed: 0,
@ -350,50 +304,4 @@ mod test {
assert_eq!(e_meta.status(&index), DataFull);
}
}
#[test]
fn test_erasure_set_ranges() {
let mut ranges = ErasureSetRanges::default();
// Test empty ranges
(0..100 as u64).for_each(|i| {
assert_eq!(ranges.lookup(i), Err(0));
});
// Test adding one range and all boundary condition lookups
assert_eq!(ranges.insert(5, 13), Ok(0));
assert_eq!(ranges.lookup(0), Err(0));
assert_eq!(ranges.lookup(4), Err(0));
assert_eq!(ranges.lookup(5), Ok(5..13));
assert_eq!(ranges.lookup(12), Ok(5..13));
assert_eq!(ranges.lookup(13), Err(1));
assert_eq!(ranges.lookup(100), Err(1));
// Test adding second range (with backwards values) and all boundary condition lookups
assert_eq!(ranges.insert(55, 33), Ok(1));
assert_eq!(ranges.lookup(0), Err(0));
assert_eq!(ranges.lookup(4), Err(0));
assert_eq!(ranges.lookup(5), Ok(5..13));
assert_eq!(ranges.lookup(12), Ok(5..13));
assert_eq!(ranges.lookup(13), Err(1));
assert_eq!(ranges.lookup(32), Err(1));
assert_eq!(ranges.lookup(33), Ok(33..55));
assert_eq!(ranges.lookup(54), Ok(33..55));
assert_eq!(ranges.lookup(55), Err(2));
// Add a third range between previous two ranges
assert_eq!(ranges.insert(23, 30), Ok(1));
assert_eq!(ranges.lookup(0), Err(0));
assert_eq!(ranges.lookup(4), Err(0));
assert_eq!(ranges.lookup(5), Ok(5..13));
assert_eq!(ranges.lookup(12), Ok(5..13));
assert_eq!(ranges.lookup(13), Err(1));
assert_eq!(ranges.lookup(23), Ok(23..30));
assert_eq!(ranges.lookup(29), Ok(23..30));
assert_eq!(ranges.lookup(30), Err(2));
assert_eq!(ranges.lookup(32), Err(2));
assert_eq!(ranges.lookup(33), Ok(33..55));
assert_eq!(ranges.lookup(54), Ok(33..55));
assert_eq!(ranges.lookup(55), Err(3));
}
}

View File

@ -1,5 +1,6 @@
use crate::bank_forks::BankForks;
use crate::blocktree::{Blocktree, SlotMeta};
use crate::blocktree::Blocktree;
use crate::blocktree_meta::SlotMeta;
use crate::entry::{create_ticks, Entry, EntrySlice};
use crate::leader_schedule_cache::LeaderScheduleCache;
use log::*;

View File

@ -1,6 +1,8 @@
pub mod bank_forks;
#[macro_use]
pub mod blocktree;
mod blocktree_db;
mod blocktree_meta;
pub mod blocktree_processor;
pub mod entry;
pub mod erasure;

View File

@ -1,4 +1,5 @@
use crate::blocktree::*;
use crate::blocktree_meta::SlotMeta;
use log::*;
pub struct RootedSlotIterator<'a> {