Rename blocktree to blockstore (#7757)

automerge
This commit is contained in:
Greg Fitzgerald 2020-01-13 14:13:52 -07:00 committed by Grimes
parent ef06d165b4
commit b5dba77056
59 changed files with 1616 additions and 1534 deletions

View File

@ -10,7 +10,7 @@ use solana_core::packet::to_packets_chunked;
use solana_core::poh_recorder::PohRecorder; use solana_core::poh_recorder::PohRecorder;
use solana_core::poh_recorder::WorkingBankEntry; use solana_core::poh_recorder::WorkingBankEntry;
use solana_ledger::bank_forks::BankForks; use solana_ledger::bank_forks::BankForks;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_measure::measure::Measure; use solana_measure::measure::Measure;
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
@ -139,11 +139,11 @@ fn main() {
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH); let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
); );
let (exit, poh_recorder, poh_service, signal_receiver) = let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree, None); create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info)); let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new( let banking_stage = BankingStage::new(
@ -302,5 +302,5 @@ fn main() {
sleep(Duration::from_secs(1)); sleep(Duration::from_secs(1));
debug!("waited for poh_service"); debug!("waited for poh_service");
} }
let _unused = Blocktree::destroy(&ledger_path); let _unused = Blockstore::destroy(&ledger_path);
} }

View File

@ -18,9 +18,9 @@
| | `-------` `--------` `--+---------` | | | | | | | `-------` `--------` `--+---------` | | | | |
| | ^ ^ | | | `------------` | | | ^ ^ | | | `------------` |
| | | | v | | | | | | | v | | |
| | | .--+--------. | | | | | | .--+---------. | | |
| | | | Blocktree | | | | | | | | Blockstore | | | |
| | | `-----------` | | .------------. | | | | `------------` | | .------------. |
| | | ^ | | | | | | | | ^ | | | | |
| | | | | | | Downstream | | | | | | | | | Downstream | |
| | .--+--. .-------+---. | | | Validators | | | | .--+--. .-------+---. | | | Validators | |

View File

@ -21,7 +21,7 @@
* [Anatomy of a Validator](validator/README.md) * [Anatomy of a Validator](validator/README.md)
* [TPU](validator/tpu.md) * [TPU](validator/tpu.md)
* [TVU](validator/tvu/README.md) * [TVU](validator/tvu/README.md)
* [Blocktree](validator/tvu/blocktree.md) * [Blockstore](validator/tvu/blockstore.md)
* [Gossip Service](validator/gossip.md) * [Gossip Service](validator/gossip.md)
* [The Runtime](validator/runtime.md) * [The Runtime](validator/runtime.md)
* [Anatomy of a Transaction](transaction.md) * [Anatomy of a Transaction](transaction.md)
@ -62,7 +62,7 @@
* [Block Confirmation](proposals/block-confirmation.md) * [Block Confirmation](proposals/block-confirmation.md)
* [ABI Management](proposals/abi-management.md) * [ABI Management](proposals/abi-management.md)
* [Implemented Design Proposals](implemented-proposals/README.md) * [Implemented Design Proposals](implemented-proposals/README.md)
* [Blocktree](implemented-proposals/blocktree.md) * [Blockstore](implemented-proposals/blockstore.md)
* [Cluster Software Installation and Updates](implemented-proposals/installer.md) * [Cluster Software Installation and Updates](implemented-proposals/installer.md)
* [Cluster Economics](implemented-proposals/ed_overview/README.md) * [Cluster Economics](implemented-proposals/ed_overview/README.md)
* [Validation-client Economics](implemented-proposals/ed_overview/ed_validation_client_economics/README.md) * [Validation-client Economics](implemented-proposals/ed_overview/ed_validation_client_economics/README.md)

View File

@ -1,6 +1,6 @@
# Managing Forks # Managing Forks
The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blocktree_. When the validator interprets the blocktree, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a validator to weigh those forks, such that it may eventually select a fork. The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blockstore_. When the validator interprets the blockstore, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a validator to weigh those forks, such that it may eventually select a fork.
A validator selects a fork by submiting a vote to a slot leader on that fork. The vote commits the validator for a duration of time called a _lockout period_. The validator is not permitted to vote on a different fork until that lockout period expires. Each subsequent vote on the same fork doubles the length of the lockout period. After some cluster-configured number of votes \(currently 32\), the length of the lockout period reaches what's called _max lockout_. Until the max lockout is reached, the validator has the option to wait until the lockout period is over and then vote on another fork. When it votes on another fork, it performs a operation called _rollback_, whereby the state rolls back in time to a shared checkpoint and then jumps forward to the tip of the fork that it just voted on. The maximum distance that a fork may roll back is called the _rollback depth_. Rollback depth is the number of votes required to achieve max lockout. Whenever a validator votes, any checkpoints beyond the rollback depth become unreachable. That is, there is no scenario in which the validator will need to roll back beyond rollback depth. It therefore may safely _prune_ unreachable forks and _squash_ all checkpoints beyond rollback depth into the root checkpoint. A validator selects a fork by submiting a vote to a slot leader on that fork. The vote commits the validator for a duration of time called a _lockout period_. The validator is not permitted to vote on a different fork until that lockout period expires. Each subsequent vote on the same fork doubles the length of the lockout period. After some cluster-configured number of votes \(currently 32\), the length of the lockout period reaches what's called _max lockout_. Until the max lockout is reached, the validator has the option to wait until the lockout period is over and then vote on another fork. When it votes on another fork, it performs a operation called _rollback_, whereby the state rolls back in time to a shared checkpoint and then jumps forward to the tip of the fork that it just voted on. The maximum distance that a fork may roll back is called the _rollback depth_. Rollback depth is the number of votes required to achieve max lockout. Whenever a validator votes, any checkpoints beyond the rollback depth become unreachable. That is, there is no scenario in which the validator will need to roll back beyond rollback depth. It therefore may safely _prune_ unreachable forks and _squash_ all checkpoints beyond rollback depth into the root checkpoint.

View File

@ -1,16 +1,16 @@
# Blocktree # Blockstore
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blocktree_ data structure described here is how a validator copes with those forks until blocks are finalized. After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized.
The blocktree allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot. The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them. Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them.
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blocktree. Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blockstore.
## Functionalities of Blocktree ## Functionalities of Blockstore
1. Persistence: the Blocktree lives in the front of the nodes verification 1. Persistence: the Blockstore lives in the front of the nodes verification
pipeline, right behind network receive and signature verification. If the pipeline, right behind network receive and signature verification. If the
@ -20,26 +20,26 @@ Repair requests for recent shreds are served out of RAM or recent files and out
2. Repair: repair is the same as window repair above, but able to serve any 2. Repair: repair is the same as window repair above, but able to serve any
shred that's been received. Blocktree stores shreds with signatures, shred that's been received. Blockstore stores shreds with signatures,
preserving the chain of origination. preserving the chain of origination.
3. Forks: Blocktree supports random access of shreds, so can support a 3. Forks: Blockstore supports random access of shreds, so can support a
validator's need to rollback and replay from a Bank checkpoint. validator's need to rollback and replay from a Bank checkpoint.
4. Restart: with proper pruning/culling, the Blocktree can be replayed by 4. Restart: with proper pruning/culling, the Blockstore can be replayed by
ordered enumeration of entries from slot 0. The logic of the replay stage ordered enumeration of entries from slot 0. The logic of the replay stage
\(i.e. dealing with forks\) will have to be used for the most recent entries in \(i.e. dealing with forks\) will have to be used for the most recent entries in
the Blocktree. the Blockstore.
## Blocktree Design ## Blockstore Design
1. Entries in the Blocktree are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\). 1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
2. The Blocktree maintains metadata for each slot, in the `SlotMeta` struct containing: 2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing:
* `slot_index` - The index of this slot * `slot_index` - The index of this slot
* `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\) * `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\)
* `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\). * `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\).
@ -53,16 +53,16 @@ Repair requests for recent shreds are served out of RAM or recent files and out
is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\) is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\)
3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`. 3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`.
4. Subscriptions - The Blocktree records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blocktree channel for consumption by the ReplayStage. See the `Blocktree APIs` for details. 4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details.
5. Update notifications - The Blocktree notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`. 5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
## Blocktree APIs ## Blockstore APIs
The Blocktree offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blocktree. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`. The Blockstore offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blockstore. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed. 1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed.
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blocktree. Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blockstore.
## Interfacing with Bank ## Interfacing with Bank
@ -80,11 +80,11 @@ The bank exposes to replay stage:
be able to be chained below this vote be able to be chained below this vote
Replay stage uses Blocktree APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there. Replay stage uses Blockstore APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
## Pruning Blocktree ## Pruning Blockstore
Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged. Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged.
Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically. Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically.

View File

@ -8,32 +8,32 @@ The RepairService is in charge of retrieving missing shreds that failed to be de
1\) Validators can fail to receive particular shreds due to network failures 1\) Validators can fail to receive particular shreds due to network failures
2\) Consider a scenario where blocktree contains the set of slots {1, 3, 5}. Then Blocktree receives shreds for some slot 7, where for each of the shreds b, b.parent == 6, so then the parent-child relation 6 -&gt; 7 is stored in blocktree. However, there is no way to chain these slots to any of the existing banks in Blocktree, and thus the `Shred Repair` protocol will not repair these slots. If these slots happen to be part of the main chain, this will halt replay progress on this node. 2\) Consider a scenario where blockstore contains the set of slots {1, 3, 5}. Then Blockstore receives shreds for some slot 7, where for each of the shreds b, b.parent == 6, so then the parent-child relation 6 -&gt; 7 is stored in blockstore. However, there is no way to chain these slots to any of the existing banks in Blockstore, and thus the `Shred Repair` protocol will not repair these slots. If these slots happen to be part of the main chain, this will halt replay progress on this node.
3\) Validators that find themselves behind the cluster by an entire epoch struggle/fail to catch up because they do not have a leader schedule for future epochs. If nodes were to blindly accept repair shreds in these future epochs, this exposes nodes to spam. 3\) Validators that find themselves behind the cluster by an entire epoch struggle/fail to catch up because they do not have a leader schedule for future epochs. If nodes were to blindly accept repair shreds in these future epochs, this exposes nodes to spam.
## Repair Protocols ## Repair Protocols
The repair protocol makes best attempts to progress the forking structure of Blocktree. The repair protocol makes best attempts to progress the forking structure of Blockstore.
The different protocol strategies to address the above challenges: The different protocol strategies to address the above challenges:
1. Shred Repair \(Addresses Challenge \#1\): This is the most basic repair protocol, with the purpose of detecting and filling "holes" in the ledger. Blocktree tracks the latest root slot. RepairService will then periodically iterate every fork in blocktree starting from the root slot, sending repair requests to validators for any missing shreds. It will send at most some `N` repair reqeusts per iteration. 1. Shred Repair \(Addresses Challenge \#1\): This is the most basic repair protocol, with the purpose of detecting and filling "holes" in the ledger. Blockstore tracks the latest root slot. RepairService will then periodically iterate every fork in blockstore starting from the root slot, sending repair requests to validators for any missing shreds. It will send at most some `N` repair reqeusts per iteration.
Note: Validators will only accept shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\). Note: Validators will only accept shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\).
2. Preemptive Slot Repair \(Addresses Challenge \#2\): The goal of this protocol is to discover the chaining relationship of "orphan" slots that do not currently chain to any known fork. 2. Preemptive Slot Repair \(Addresses Challenge \#2\): The goal of this protocol is to discover the chaining relationship of "orphan" slots that do not currently chain to any known fork.
* Blocktree will track the set of "orphan" slots in a separate column family. * Blockstore will track the set of "orphan" slots in a separate column family.
* RepairService will periodically make `RequestOrphan` requests for each of the orphans in blocktree. * RepairService will periodically make `RequestOrphan` requests for each of the orphans in blockstore.
`RequestOrphan(orphan)` request - `orphan` is the orphan slot that the requestor wants to know the parents of `RequestOrphan(orphan)` response - The highest shreds for each of the first `N` parents of the requested `orphan` `RequestOrphan(orphan)` request - `orphan` is the orphan slot that the requestor wants to know the parents of `RequestOrphan(orphan)` response - The highest shreds for each of the first `N` parents of the requested `orphan`
On receiving the responses `p`, where `p` is some shred in a parent slot, validators will: On receiving the responses `p`, where `p` is some shred in a parent slot, validators will:
* Insert an empty `SlotMeta` in blocktree for `p.slot` if it doesn't already exist. * Insert an empty `SlotMeta` in blockstore for `p.slot` if it doesn't already exist.
* If `p.slot` does exist, update the parent of `p` based on `parents` * If `p.slot` does exist, update the parent of `p` based on `parents`
Note: that once these empty slots are added to blocktree, the `Shred Repair` protocol should attempt to fill those slots. Note: that once these empty slots are added to blockstore, the `Shred Repair` protocol should attempt to fill those slots.
Note: Validators will only accept responses containing shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\). Note: Validators will only accept responses containing shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\).
3. Repairmen \(Addresses Challenge \#3\): This part of the repair protocol is the primary mechanism by which new nodes joining the cluster catch up after loading a snapshot. This protocol works in a "forward" fashion, so validators can verify every shred that they receive against a known leader schedule. 3. Repairmen \(Addresses Challenge \#3\): This part of the repair protocol is the primary mechanism by which new nodes joining the cluster catch up after loading a snapshot. This protocol works in a "forward" fashion, so validators can verify every shred that they receive against a known leader schedule.
@ -45,5 +45,5 @@ The different protocol strategies to address the above challenges:
Observers of this gossip message with higher epochs \(repairmen\) send shreds to catch the lagging node up with the rest of the cluster. The repairmen are responsible for sending the slots within the epochs that are confrimed by the advertised `root` in gossip. The repairmen divide the responsibility of sending each of the missing slots in these epochs based on a random seed \(simple shred.index iteration by N, seeded with the repairman's node\_pubkey\). Ideally, each repairman in an N node cluster \(N nodes whose epochs are higher than that of the repairee\) sends 1/N of the missing shreds. Both data and coding shreds for missing slots are sent. Repairmen do not send shreds again to the same validator until they see the message in gossip updated, at which point they perform another iteration of this protocol. Observers of this gossip message with higher epochs \(repairmen\) send shreds to catch the lagging node up with the rest of the cluster. The repairmen are responsible for sending the slots within the epochs that are confrimed by the advertised `root` in gossip. The repairmen divide the responsibility of sending each of the missing slots in these epochs based on a random seed \(simple shred.index iteration by N, seeded with the repairman's node\_pubkey\). Ideally, each repairman in an N node cluster \(N nodes whose epochs are higher than that of the repairee\) sends 1/N of the missing shreds. Both data and coding shreds for missing slots are sent. Repairmen do not send shreds again to the same validator until they see the message in gossip updated, at which point they perform another iteration of this protocol.
Gossip messages are updated every time a validator receives a complete slot within the epoch. Completed slots are detected by blocktree and sent over a channel to RepairService. It is important to note that we know that by the time a slot X is complete, the epoch schedule must exist for the epoch that contains slot X because WindowService will reject shreds for unconfirmed epochs. When a newly completed slot is detected, we also update the current root if it has changed since the last update. The root is made available to RepairService through Blocktree, which holds the latest root. Gossip messages are updated every time a validator receives a complete slot within the epoch. Completed slots are detected by blockstore and sent over a channel to RepairService. It is important to note that we know that by the time a slot X is complete, the epoch schedule must exist for the epoch that contains slot X because WindowService will reject shreds for unconfirmed epochs. When a newly completed slot is detected, we also update the current root if it has changed since the last update. The root is made available to RepairService through Blockstore, which holds the latest root.

View File

@ -84,7 +84,7 @@ let timestamp_slot = floor(current_slot / timestamp_interval);
``` ```
Then the validator needs to gather all Vote WithTimestamp transactions from the Then the validator needs to gather all Vote WithTimestamp transactions from the
ledger that reference that slot, using `Blocktree::get_slot_entries()`. As these ledger that reference that slot, using `Blockstore::get_slot_entries()`. As these
transactions could have taken some time to reach and be processed by the leader, transactions could have taken some time to reach and be processed by the leader,
the validator needs to scan several completed blocks after the timestamp\_slot to the validator needs to scan several completed blocks after the timestamp\_slot to
get a reasonable set of Timestamps. The exact number of slots will need to be get a reasonable set of Timestamps. The exact number of slots will need to be

View File

@ -28,17 +28,17 @@ slashing proof to punish this bad behavior.
2) Otherwise, we can simply mark the slot as dead and not playable. A slashing 2) Otherwise, we can simply mark the slot as dead and not playable. A slashing
proof may or may not be necessary depending on feasibility. proof may or may not be necessary depending on feasibility.
# Blocktree receiving shreds # Blockstore receiving shreds
When blocktree receives a new shred `s`, there are two cases: When blockstore receives a new shred `s`, there are two cases:
1) `s` is marked as `LAST_SHRED_IN_SLOT`, then check if there exists a shred 1) `s` is marked as `LAST_SHRED_IN_SLOT`, then check if there exists a shred
`s'` in blocktree for that slot where `s'.index > s.index` If so, together `s` `s'` in blockstore for that slot where `s'.index > s.index` If so, together `s`
and `s'` constitute a slashing proof. and `s'` constitute a slashing proof.
2) Blocktree has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT` 2) Blockstore has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT`
with index `i`. If `s.index > i`, then together `s` and `s'`constitute a with index `i`. If `s.index > i`, then together `s` and `s'`constitute a
slashing proof. In this case, blocktree will also not insert `s`. slashing proof. In this case, blockstore will also not insert `s`.
3) Duplicate shreds for the same index are ignored. Non-duplicate shreds for 3) Duplicate shreds for the same index are ignored. Non-duplicate shreds for
the same index are a slashable condition. Details for this case are covered the same index are a slashable condition. Details for this case are covered
@ -47,7 +47,7 @@ in the `Leader Duplicate Block Slashing` section.
# Replaying and validating ticks # Replaying and validating ticks
1) Replay stage replays entries from blocktree, keeping track of the number of 1) Replay stage replays entries from blockstore, keeping track of the number of
ticks it has seen per slot, and verifying there are `hashes_per_tick` number of ticks it has seen per slot, and verifying there are `hashes_per_tick` number of
hashes between ticcks. After the tick from this last shred has been played, hashes between ticcks. After the tick from this last shred has been played,
replay stage then checks the total number of ticks. replay stage then checks the total number of ticks.

View File

@ -41,7 +41,6 @@ schedule.
## Notable changes ## Notable changes
* Hoist FetchStage and BroadcastStage out of TPU * Hoist FetchStage and BroadcastStage out of TPU
* Blocktree renamed to Blockstore
* BankForks renamed to Banktree * BankForks renamed to Banktree
* TPU moves to new socket-free crate called solana-tpu. * TPU moves to new socket-free crate called solana-tpu.
* TPU's BankingStage absorbs ReplayStage * TPU's BankingStage absorbs ReplayStage

View File

@ -1,16 +1,16 @@
# Blocktree # Blockstore
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../../cluster/fork-generation.md). The _blocktree_ data structure described here is how a validator copes with those forks until blocks are finalized. After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized.
The blocktree allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot. The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them. Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them.
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blocktree. Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blockstore.
## Functionalities of Blocktree ## Functionalities of Blockstore
1. Persistence: the Blocktree lives in the front of the nodes verification 1. Persistence: the Blockstore lives in the front of the nodes verification
pipeline, right behind network receive and signature verification. If the pipeline, right behind network receive and signature verification. If the
@ -20,26 +20,26 @@ Repair requests for recent shreds are served out of RAM or recent files and out
2. Repair: repair is the same as window repair above, but able to serve any 2. Repair: repair is the same as window repair above, but able to serve any
shred that's been received. Blocktree stores shreds with signatures, shred that's been received. Blockstore stores shreds with signatures,
preserving the chain of origination. preserving the chain of origination.
3. Forks: Blocktree supports random access of shreds, so can support a 3. Forks: Blockstore supports random access of shreds, so can support a
validator's need to rollback and replay from a Bank checkpoint. validator's need to rollback and replay from a Bank checkpoint.
4. Restart: with proper pruning/culling, the Blocktree can be replayed by 4. Restart: with proper pruning/culling, the Blockstore can be replayed by
ordered enumeration of entries from slot 0. The logic of the replay stage ordered enumeration of entries from slot 0. The logic of the replay stage
\(i.e. dealing with forks\) will have to be used for the most recent entries in \(i.e. dealing with forks\) will have to be used for the most recent entries in
the Blocktree. the Blockstore.
## Blocktree Design ## Blockstore Design
1. Entries in the Blocktree are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\). 1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
2. The Blocktree maintains metadata for each slot, in the `SlotMeta` struct containing: 2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing:
* `slot_index` - The index of this slot * `slot_index` - The index of this slot
* `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\) * `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\)
* `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\). * `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\).
@ -53,16 +53,16 @@ Repair requests for recent shreds are served out of RAM or recent files and out
is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\) is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\)
3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`. 3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`.
4. Subscriptions - The Blocktree records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blocktree channel for consumption by the ReplayStage. See the `Blocktree APIs` for details. 4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details.
5. Update notifications - The Blocktree notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`. 5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
## Blocktree APIs ## Blockstore APIs
The Blocktree offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blocktree. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`. The Blockstore offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blockstore. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed. 1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed.
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blocktree. Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blockstore.
## Interfacing with Bank ## Interfacing with Bank
@ -80,11 +80,11 @@ The bank exposes to replay stage:
be able to be chained below this vote be able to be chained below this vote
Replay stage uses Blocktree APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there. Replay stage uses Blockstore APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
## Pruning Blocktree ## Pruning Blockstore
Once Blocktree entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blocktree contents that are not on the PoH chain for that vote for can be pruned, expunged. Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged.
Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically. Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically.

View File

@ -85,7 +85,7 @@ systemstat = "0.1.5"
name = "banking_stage" name = "banking_stage"
[[bench]] [[bench]]
name = "blocktree" name = "blockstore"
[[bench]] [[bench]]
name = "gen_keys" name = "gen_keys"

View File

@ -12,9 +12,9 @@ use solana_core::cluster_info::Node;
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_core::packet::to_packets_chunked; use solana_core::packet::to_packets_chunked;
use solana_core::poh_recorder::WorkingBankEntry; use solana_core::poh_recorder::WorkingBankEntry;
use solana_ledger::blocktree_processor::process_entries; use solana_ledger::blockstore_processor::process_entries;
use solana_ledger::entry::{next_hash, Entry}; use solana_ledger::entry::{next_hash, Entry};
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx; use solana_perf::test_tx::test_tx;
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::genesis_config::GenesisConfig; use solana_sdk::genesis_config::GenesisConfig;
@ -57,11 +57,11 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let my_pubkey = Pubkey::new_rand(); let my_pubkey = Pubkey::new_rand();
{ {
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
); );
let (exit, poh_recorder, poh_service, _signal_receiver) = let (exit, poh_recorder, poh_service, _signal_receiver) =
create_test_recorder(&bank, &blocktree, None); create_test_recorder(&bank, &blockstore, None);
let tx = test_tx(); let tx = test_tx();
let len = 4096; let len = 4096;
@ -87,7 +87,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
exit.store(true, Ordering::Relaxed); exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap(); poh_service.join().unwrap();
} }
let _unused = Blocktree::destroy(&ledger_path); let _unused = Blockstore::destroy(&ledger_path);
} }
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> { fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
@ -184,11 +184,11 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH); let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
); );
let (exit, poh_recorder, poh_service, signal_receiver) = let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree, None); create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info)); let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new( let _banking_stage = BankingStage::new(
@ -244,7 +244,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
exit.store(true, Ordering::Relaxed); exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap(); poh_service.join().unwrap();
} }
let _unused = Blocktree::destroy(&ledger_path); let _unused = Blockstore::destroy(&ledger_path);
} }
#[bench] #[bench]

View File

@ -6,7 +6,7 @@ extern crate test;
use rand::Rng; use rand::Rng;
use solana_ledger::{ use solana_ledger::{
blocktree::{entries_to_test_shreds, Blocktree}, blockstore::{entries_to_test_shreds, Blockstore},
entry::{create_ticks, Entry}, entry::{create_ticks, Entry},
get_tmp_ledger_path, get_tmp_ledger_path,
}; };
@ -16,19 +16,19 @@ use test::Bencher;
// Given some shreds and a ledger at ledger_path, benchmark writing the shreds to the ledger // Given some shreds and a ledger at ledger_path, benchmark writing the shreds to the ledger
fn bench_write_shreds(bench: &mut Bencher, entries: Vec<Entry>, ledger_path: &Path) { fn bench_write_shreds(bench: &mut Bencher, entries: Vec<Entry>, ledger_path: &Path) {
let blocktree = let blockstore =
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger"); Blockstore::open(ledger_path).expect("Expected to be able to open database ledger");
bench.iter(move || { bench.iter(move || {
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0); let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
}); });
Blocktree::destroy(ledger_path).expect("Expected successful database destruction"); Blockstore::destroy(ledger_path).expect("Expected successful database destruction");
} }
// Insert some shreds into the ledger in preparation for read benchmarks // Insert some shreds into the ledger in preparation for read benchmarks
fn setup_read_bench( fn setup_read_bench(
blocktree: &mut Blocktree, blockstore: &mut Blockstore,
num_small_shreds: u64, num_small_shreds: u64,
num_large_shreds: u64, num_large_shreds: u64,
slot: Slot, slot: Slot,
@ -42,7 +42,7 @@ fn setup_read_bench(
// Convert the entries to shreds, write the shreds to the ledger // Convert the entries to shreds, write the shreds to the ledger
let shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0); let shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0);
blocktree blockstore
.insert_shreds(shreds, None, false) .insert_shreds(shreds, None, false)
.expect("Expectd successful insertion of shreds into ledger"); .expect("Expectd successful insertion of shreds into ledger");
} }
@ -71,15 +71,15 @@ fn bench_write_big(bench: &mut Bencher) {
#[ignore] #[ignore]
fn bench_read_sequential(bench: &mut Bencher) { fn bench_read_sequential(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let mut blocktree = let mut blockstore =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
// Insert some big and small shreds into the ledger // Insert some big and small shreds into the ledger
let num_small_shreds = 32 * 1024; let num_small_shreds = 32 * 1024;
let num_large_shreds = 32 * 1024; let num_large_shreds = 32 * 1024;
let total_shreds = num_small_shreds + num_large_shreds; let total_shreds = num_small_shreds + num_large_shreds;
let slot = 0; let slot = 0;
setup_read_bench(&mut blocktree, num_small_shreds, num_large_shreds, slot); setup_read_bench(&mut blockstore, num_small_shreds, num_large_shreds, slot);
let num_reads = total_shreds / 15; let num_reads = total_shreds / 15;
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
@ -87,26 +87,26 @@ fn bench_read_sequential(bench: &mut Bencher) {
// Generate random starting point in the range [0, total_shreds - 1], read num_reads shreds sequentially // Generate random starting point in the range [0, total_shreds - 1], read num_reads shreds sequentially
let start_index = rng.gen_range(0, num_small_shreds + num_large_shreds); let start_index = rng.gen_range(0, num_small_shreds + num_large_shreds);
for i in start_index..start_index + num_reads { for i in start_index..start_index + num_reads {
let _ = blocktree.get_data_shred(slot, i as u64 % total_shreds); let _ = blockstore.get_data_shred(slot, i as u64 % total_shreds);
} }
}); });
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
} }
#[bench] #[bench]
#[ignore] #[ignore]
fn bench_read_random(bench: &mut Bencher) { fn bench_read_random(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let mut blocktree = let mut blockstore =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
// Insert some big and small shreds into the ledger // Insert some big and small shreds into the ledger
let num_small_shreds = 32 * 1024; let num_small_shreds = 32 * 1024;
let num_large_shreds = 32 * 1024; let num_large_shreds = 32 * 1024;
let total_shreds = num_small_shreds + num_large_shreds; let total_shreds = num_small_shreds + num_large_shreds;
let slot = 0; let slot = 0;
setup_read_bench(&mut blocktree, num_small_shreds, num_large_shreds, slot); setup_read_bench(&mut blockstore, num_small_shreds, num_large_shreds, slot);
let num_reads = total_shreds / 15; let num_reads = total_shreds / 15;
@ -118,39 +118,39 @@ fn bench_read_random(bench: &mut Bencher) {
.collect(); .collect();
bench.iter(move || { bench.iter(move || {
for i in indexes.iter() { for i in indexes.iter() {
let _ = blocktree.get_data_shred(slot, *i as u64); let _ = blockstore.get_data_shred(slot, *i as u64);
} }
}); });
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
} }
#[bench] #[bench]
#[ignore] #[ignore]
fn bench_insert_data_shred_small(bench: &mut Bencher) { fn bench_insert_data_shred_small(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let blocktree = let blockstore =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024; let num_entries = 32 * 1024;
let entries = create_ticks(num_entries, 0, Hash::default()); let entries = create_ticks(num_entries, 0, Hash::default());
bench.iter(move || { bench.iter(move || {
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0); let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
}); });
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
} }
#[bench] #[bench]
#[ignore] #[ignore]
fn bench_insert_data_shred_big(bench: &mut Bencher) { fn bench_insert_data_shred_big(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let blocktree = let blockstore =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024; let num_entries = 32 * 1024;
let entries = create_ticks(num_entries, 0, Hash::default()); let entries = create_ticks(num_entries, 0, Hash::default());
bench.iter(move || { bench.iter(move || {
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0); let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
}); });
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
} }

View File

@ -19,7 +19,7 @@ use rand::{thread_rng, Rng, SeedableRng};
use rand_chacha::ChaChaRng; use rand_chacha::ChaChaRng;
use solana_client::{rpc_client::RpcClient, rpc_request::RpcRequest, thin_client::ThinClient}; use solana_client::{rpc_client::RpcClient, rpc_request::RpcRequest, thin_client::ThinClient};
use solana_ledger::{ use solana_ledger::{
blocktree::Blocktree, leader_schedule_cache::LeaderScheduleCache, shred::Shred, blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
}; };
use solana_net_utils::bind_in_range; use solana_net_utils::bind_in_range;
use solana_perf::packet::Packets; use solana_perf::packet::Packets;
@ -222,13 +222,13 @@ impl Archiver {
// Note for now, this ledger will not contain any of the existing entries // Note for now, this ledger will not contain any of the existing entries
// in the ledger located at ledger_path, and will only append on newly received // in the ledger located at ledger_path, and will only append on newly received
// entries after being passed to window_service // entries after being passed to window_service
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"),
); );
let gossip_service = GossipService::new( let gossip_service = GossipService::new(
&cluster_info, &cluster_info,
Some(blocktree.clone()), Some(blockstore.clone()),
None, None,
node.sockets.gossip, node.sockets.gossip,
&exit, &exit,
@ -294,7 +294,7 @@ impl Archiver {
let window_service = match Self::setup( let window_service = match Self::setup(
&mut meta, &mut meta,
cluster_info.clone(), cluster_info.clone(),
&blocktree, &blockstore,
&exit, &exit,
&node_info, &node_info,
&storage_keypair, &storage_keypair,
@ -320,7 +320,7 @@ impl Archiver {
// run archiver // run archiver
Self::run( Self::run(
&mut meta, &mut meta,
&blocktree, &blockstore,
cluster_info, cluster_info,
&keypair, &keypair,
&storage_keypair, &storage_keypair,
@ -344,14 +344,14 @@ impl Archiver {
fn run( fn run(
meta: &mut ArchiverMeta, meta: &mut ArchiverMeta,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
archiver_keypair: &Arc<Keypair>, archiver_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>, storage_keypair: &Arc<Keypair>,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
) { ) {
// encrypt segment // encrypt segment
Self::encrypt_ledger(meta, blocktree).expect("ledger encrypt not successful"); Self::encrypt_ledger(meta, blockstore).expect("ledger encrypt not successful");
let enc_file_path = meta.ledger_data_file_encrypted.clone(); let enc_file_path = meta.ledger_data_file_encrypted.clone();
// do replicate // do replicate
loop { loop {
@ -443,7 +443,7 @@ impl Archiver {
fn setup( fn setup(
meta: &mut ArchiverMeta, meta: &mut ArchiverMeta,
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
node_info: &ContactInfo, node_info: &ContactInfo,
storage_keypair: &Arc<Keypair>, storage_keypair: &Arc<Keypair>,
@ -498,7 +498,7 @@ impl Archiver {
); );
let window_service = WindowService::new( let window_service = WindowService::new(
blocktree.clone(), blockstore.clone(),
cluster_info.clone(), cluster_info.clone(),
verified_receiver, verified_receiver,
retransmit_sender, retransmit_sender,
@ -512,7 +512,7 @@ impl Archiver {
Self::wait_for_segment_download( Self::wait_for_segment_download(
slot, slot,
slots_per_segment, slots_per_segment,
&blocktree, &blockstore,
&exit, &exit,
&node_info, &node_info,
cluster_info, cluster_info,
@ -523,7 +523,7 @@ impl Archiver {
fn wait_for_segment_download( fn wait_for_segment_download(
start_slot: Slot, start_slot: Slot,
slots_per_segment: u64, slots_per_segment: u64,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
node_info: &ContactInfo, node_info: &ContactInfo,
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
@ -534,7 +534,7 @@ impl Archiver {
); );
let mut current_slot = start_slot; let mut current_slot = start_slot;
'outer: loop { 'outer: loop {
while blocktree.is_full(current_slot) { while blockstore.is_full(current_slot) {
current_slot += 1; current_slot += 1;
info!("current slot: {}", current_slot); info!("current slot: {}", current_slot);
if current_slot >= start_slot + slots_per_segment { if current_slot >= start_slot + slots_per_segment {
@ -559,7 +559,7 @@ impl Archiver {
} }
} }
fn encrypt_ledger(meta: &mut ArchiverMeta, blocktree: &Arc<Blocktree>) -> Result<()> { fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc<Blockstore>) -> Result<()> {
meta.ledger_data_file_encrypted = meta.ledger_path.join(ENCRYPTED_FILENAME); meta.ledger_data_file_encrypted = meta.ledger_path.join(ENCRYPTED_FILENAME);
{ {
@ -567,7 +567,7 @@ impl Archiver {
ivec.copy_from_slice(&meta.signature.as_ref()); ivec.copy_from_slice(&meta.signature.as_ref());
let num_encrypted_bytes = chacha_cbc_encrypt_ledger( let num_encrypted_bytes = chacha_cbc_encrypt_ledger(
blocktree, blockstore,
meta.slot, meta.slot,
meta.slots_per_segment, meta.slots_per_segment,
&meta.ledger_data_file_encrypted, &meta.ledger_data_file_encrypted,
@ -844,15 +844,15 @@ impl Archiver {
} }
} }
/// Ask an archiver to populate a given blocktree with its segment. /// Ask an archiver to populate a given blockstore with its segment.
/// Return the slot at the start of the archiver's segment /// Return the slot at the start of the archiver's segment
/// ///
/// It is recommended to use a temporary blocktree for this since the download will not verify /// It is recommended to use a temporary blockstore for this since the download will not verify
/// shreds received and might impact the chaining of shreds across slots /// shreds received and might impact the chaining of shreds across slots
pub fn download_from_archiver( pub fn download_from_archiver(
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
archiver_info: &ContactInfo, archiver_info: &ContactInfo,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
slots_per_segment: u64, slots_per_segment: u64,
) -> Result<u64> { ) -> Result<u64> {
// Create a client which downloads from the archiver and see that it // Create a client which downloads from the archiver and see that it
@ -884,7 +884,7 @@ impl Archiver {
for _ in 0..120 { for _ in 0..120 {
// Strategy used by archivers // Strategy used by archivers
let repairs = RepairService::generate_repairs_in_range( let repairs = RepairService::generate_repairs_in_range(
blocktree, blockstore,
repair_service::MAX_REPAIR_LENGTH, repair_service::MAX_REPAIR_LENGTH,
&repair_slot_range, &repair_slot_range,
); );
@ -930,10 +930,10 @@ impl Archiver {
.into_iter() .into_iter()
.filter_map(|p| Shred::new_from_serialized_shred(p.data.to_vec()).ok()) .filter_map(|p| Shred::new_from_serialized_shred(p.data.to_vec()).ok())
.collect(); .collect();
blocktree.insert_shreds(shreds, None, false)?; blockstore.insert_shreds(shreds, None, false)?;
} }
// check if all the slots in the segment are complete // check if all the slots in the segment are complete
if Self::segment_complete(start_slot, slots_per_segment, blocktree) { if Self::segment_complete(start_slot, slots_per_segment, blockstore) {
break; break;
} }
sleep(Duration::from_millis(500)); sleep(Duration::from_millis(500));
@ -942,7 +942,7 @@ impl Archiver {
t_receiver.join().unwrap(); t_receiver.join().unwrap();
// check if all the slots in the segment are complete // check if all the slots in the segment are complete
if !Self::segment_complete(start_slot, slots_per_segment, blocktree) { if !Self::segment_complete(start_slot, slots_per_segment, blockstore) {
return Err( return Err(
io::Error::new(ErrorKind::Other, "Unable to download the full segment").into(), io::Error::new(ErrorKind::Other, "Unable to download the full segment").into(),
); );
@ -953,10 +953,10 @@ impl Archiver {
fn segment_complete( fn segment_complete(
start_slot: Slot, start_slot: Slot,
slots_per_segment: u64, slots_per_segment: u64,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
) -> bool { ) -> bool {
for slot in start_slot..(start_slot + slots_per_segment) { for slot in start_slot..(start_slot + slots_per_segment) {
if !blocktree.is_full(slot) { if !blockstore.is_full(slot) {
return false; return false;
} }
} }

View File

@ -10,8 +10,8 @@ use crate::{
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError}; use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
use itertools::Itertools; use itertools::Itertools;
use solana_ledger::{ use solana_ledger::{
blocktree::Blocktree, blockstore::Blockstore,
blocktree_processor::{send_transaction_status_batch, TransactionStatusSender}, blockstore_processor::{send_transaction_status_batch, TransactionStatusSender},
entry::hash_transactions, entry::hash_transactions,
leader_schedule_cache::LeaderScheduleCache, leader_schedule_cache::LeaderScheduleCache,
}; };
@ -979,7 +979,7 @@ impl BankingStage {
pub fn create_test_recorder( pub fn create_test_recorder(
bank: &Arc<Bank>, bank: &Arc<Bank>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
poh_config: Option<PohConfig>, poh_config: Option<PohConfig>,
) -> ( ) -> (
Arc<AtomicBool>, Arc<AtomicBool>,
@ -996,7 +996,7 @@ pub fn create_test_recorder(
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
blocktree, blockstore,
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&poh_config, &poh_config,
); );
@ -1022,7 +1022,7 @@ mod tests {
use itertools::Itertools; use itertools::Itertools;
use solana_client::rpc_request::RpcEncodedTransaction; use solana_client::rpc_request::RpcEncodedTransaction;
use solana_ledger::{ use solana_ledger::{
blocktree::entries_to_test_shreds, blockstore::entries_to_test_shreds,
entry::{next_entry, Entry, EntrySlice}, entry::{next_entry, Entry, EntrySlice},
get_tmp_ledger_path, get_tmp_ledger_path,
}; };
@ -1043,11 +1043,12 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded(); let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
); );
let (exit, poh_recorder, poh_service, _entry_receiever) = let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blocktree, None); create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info)); let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new( let banking_stage = BankingStage::new(
@ -1063,7 +1064,7 @@ mod tests {
banking_stage.join().unwrap(); banking_stage.join().unwrap();
poh_service.join().unwrap(); poh_service.join().unwrap();
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -1080,13 +1081,14 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded(); let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
); );
let mut poh_config = PohConfig::default(); let mut poh_config = PohConfig::default();
poh_config.target_tick_count = Some(bank.max_tick_height() + num_extra_ticks); poh_config.target_tick_count = Some(bank.max_tick_height() + num_extra_ticks);
let (exit, poh_recorder, poh_service, entry_receiver) = let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree, Some(poh_config)); create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info)); let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new( let banking_stage = BankingStage::new(
@ -1114,7 +1116,7 @@ mod tests {
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash()); assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
banking_stage.join().unwrap(); banking_stage.join().unwrap();
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec<u8>)>) -> Vec<Packets> { pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec<u8>)>) -> Vec<Packets> {
@ -1141,14 +1143,15 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded(); let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
); );
let mut poh_config = PohConfig::default(); let mut poh_config = PohConfig::default();
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage // limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
poh_config.target_tick_count = Some(bank.max_tick_height() - 1); poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
let (exit, poh_recorder, poh_service, entry_receiver) = let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree, Some(poh_config)); create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info)); let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new( let banking_stage = BankingStage::new(
@ -1234,7 +1237,7 @@ mod tests {
drop(entry_receiver); drop(entry_receiver);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -1280,15 +1283,15 @@ mod tests {
let entry_receiver = { let entry_receiver = {
// start a banking_stage to eat verified receiver // start a banking_stage to eat verified receiver
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path) Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"), .expect("Expected to be able to open database ledger"),
); );
let mut poh_config = PohConfig::default(); let mut poh_config = PohConfig::default();
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage // limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
poh_config.target_tick_count = Some(bank.max_tick_height() - 1); poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
let (exit, poh_recorder, poh_service, entry_receiver) = let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree, Some(poh_config)); create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = let cluster_info =
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info); ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info)); let cluster_info = Arc::new(RwLock::new(cluster_info));
@ -1331,7 +1334,7 @@ mod tests {
// the account balance below zero before the credit is added. // the account balance below zero before the credit is added.
assert_eq!(bank.get_balance(&alice.pubkey()), 2); assert_eq!(bank.get_balance(&alice.pubkey()), 2);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -1349,8 +1352,8 @@ mod tests {
}; };
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new( let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(), bank.tick_height(),
bank.last_blockhash(), bank.last_blockhash(),
@ -1358,7 +1361,7 @@ mod tests {
None, None,
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -1435,7 +1438,7 @@ mod tests {
// Should receive nothing from PohRecorder b/c record failed // Should receive nothing from PohRecorder b/c record failed
assert!(entry_receiver.try_recv().is_err()); assert!(entry_receiver.try_recv().is_err());
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -1685,8 +1688,8 @@ mod tests {
}; };
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new( let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(), bank.tick_height(),
bank.last_blockhash(), bank.last_blockhash(),
@ -1694,7 +1697,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&pubkey, &pubkey,
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -1751,7 +1754,7 @@ mod tests {
assert_eq!(bank.get_balance(&pubkey), 1); assert_eq!(bank.get_balance(&pubkey), 1);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -1778,8 +1781,8 @@ mod tests {
}; };
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver) = PohRecorder::new( let (poh_recorder, _entry_receiver) = PohRecorder::new(
bank.tick_height(), bank.tick_height(),
bank.last_blockhash(), bank.last_blockhash(),
@ -1787,7 +1790,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&pubkey, &pubkey,
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -1806,7 +1809,7 @@ mod tests {
assert!(result.is_ok()); assert!(result.is_ok());
assert_eq!(unprocessed.len(), 1); assert_eq!(unprocessed.len(), 1);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -1866,8 +1869,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver) = PohRecorder::new( let (poh_recorder, _entry_receiver) = PohRecorder::new(
bank.tick_height(), bank.tick_height(),
bank.last_blockhash(), bank.last_blockhash(),
@ -1875,7 +1878,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::new_rand(), &Pubkey::new_rand(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -1894,7 +1897,7 @@ mod tests {
assert_eq!(retryable_txs, expected); assert_eq!(retryable_txs, expected);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -1933,9 +1936,9 @@ mod tests {
}; };
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let blocktree = Arc::new(blocktree); let blockstore = Arc::new(blockstore);
let (poh_recorder, _entry_receiver) = PohRecorder::new( let (poh_recorder, _entry_receiver) = PohRecorder::new(
bank.tick_height(), bank.tick_height(),
bank.last_blockhash(), bank.last_blockhash(),
@ -1943,7 +1946,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&pubkey, &pubkey,
&blocktree, &blockstore,
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -1952,13 +1955,13 @@ mod tests {
poh_recorder.lock().unwrap().set_working_bank(working_bank); poh_recorder.lock().unwrap().set_working_bank(working_bank);
let shreds = entries_to_test_shreds(entries.clone(), bank.slot(), 0, true, 0); let shreds = entries_to_test_shreds(entries.clone(), bank.slot(), 0, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&[bank.slot()]).unwrap(); blockstore.set_roots(&[bank.slot()]).unwrap();
let (transaction_status_sender, transaction_status_receiver) = unbounded(); let (transaction_status_sender, transaction_status_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new( let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver, transaction_status_receiver,
blocktree.clone(), blockstore.clone(),
&Arc::new(AtomicBool::new(false)), &Arc::new(AtomicBool::new(false)),
); );
@ -1972,7 +1975,7 @@ mod tests {
transaction_status_service.join().unwrap(); transaction_status_service.join().unwrap();
let confirmed_block = blocktree.get_confirmed_block(bank.slot(), None).unwrap(); let confirmed_block = blockstore.get_confirmed_block(bank.slot(), None).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3); assert_eq!(confirmed_block.transactions.len(), 3);
for (transaction, result) in confirmed_block.transactions.into_iter() { for (transaction, result) in confirmed_block.transactions.into_iter() {
@ -1993,6 +1996,6 @@ mod tests {
} }
} }
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
} }

View File

@ -8,7 +8,7 @@ use crate::blockstream::MockBlockstream as Blockstream;
#[cfg(not(test))] #[cfg(not(test))]
use crate::blockstream::SocketBlockstream as Blockstream; use crate::blockstream::SocketBlockstream as Blockstream;
use crate::result::{Error, Result}; use crate::result::{Error, Result};
use solana_ledger::blocktree::Blocktree; use solana_ledger::blockstore::Blockstore;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use std::path::Path; use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
@ -25,7 +25,7 @@ impl BlockstreamService {
#[allow(clippy::new_ret_no_self)] #[allow(clippy::new_ret_no_self)]
pub fn new( pub fn new(
slot_full_receiver: Receiver<(u64, Pubkey)>, slot_full_receiver: Receiver<(u64, Pubkey)>,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
unix_socket: &Path, unix_socket: &Path,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
) -> Self { ) -> Self {
@ -38,7 +38,7 @@ impl BlockstreamService {
break; break;
} }
if let Err(e) = if let Err(e) =
Self::process_entries(&slot_full_receiver, &blocktree, &mut blockstream) Self::process_entries(&slot_full_receiver, &blockstore, &mut blockstream)
{ {
match e { match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break, Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
@ -52,18 +52,18 @@ impl BlockstreamService {
} }
fn process_entries( fn process_entries(
slot_full_receiver: &Receiver<(u64, Pubkey)>, slot_full_receiver: &Receiver<(u64, Pubkey)>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
blockstream: &mut Blockstream, blockstream: &mut Blockstream,
) -> Result<()> { ) -> Result<()> {
let timeout = Duration::new(1, 0); let timeout = Duration::new(1, 0);
let (slot, slot_leader) = slot_full_receiver.recv_timeout(timeout)?; let (slot, slot_leader) = slot_full_receiver.recv_timeout(timeout)?;
let entries = blocktree.get_slot_entries(slot, 0, None).unwrap(); let entries = blockstore.get_slot_entries(slot, 0, None).unwrap();
let blocktree_meta = blocktree.meta(slot).unwrap().unwrap(); let blockstore_meta = blockstore.meta(slot).unwrap().unwrap();
let _parent_slot = if slot == 0 { let _parent_slot = if slot == 0 {
None None
} else { } else {
Some(blocktree_meta.parent_slot) Some(blockstore_meta.parent_slot)
}; };
let ticks_per_slot = entries.iter().filter(|entry| entry.is_tick()).count() as u64; let ticks_per_slot = entries.iter().filter(|entry| entry.is_tick()).count() as u64;
let mut tick_height = ticks_per_slot * slot; let mut tick_height = ticks_per_slot * slot;
@ -113,14 +113,14 @@ mod test {
let ticks_per_slot = 5; let ticks_per_slot = 5;
let leader_pubkey = Pubkey::new_rand(); let leader_pubkey = Pubkey::new_rand();
// Set up genesis config and blocktree // Set up genesis config and blockstore
let GenesisConfigInfo { let GenesisConfigInfo {
mut genesis_config, .. mut genesis_config, ..
} = create_genesis_config(1000); } = create_genesis_config(1000);
genesis_config.ticks_per_slot = ticks_per_slot; genesis_config.ticks_per_slot = ticks_per_slot;
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
// Set up blockstream // Set up blockstream
let mut blockstream = Blockstream::new(&PathBuf::from("test_stream")); let mut blockstream = Blockstream::new(&PathBuf::from("test_stream"));
@ -143,7 +143,7 @@ mod test {
let expected_entries = entries.clone(); let expected_entries = entries.clone();
let expected_tick_heights = [6, 7, 8, 9, 9, 10]; let expected_tick_heights = [6, 7, 8, 9, 9, 10];
blocktree blockstore
.write_entries( .write_entries(
1, 1,
0, 0,
@ -160,7 +160,7 @@ mod test {
slot_full_sender.send((1, leader_pubkey)).unwrap(); slot_full_sender.send((1, leader_pubkey)).unwrap();
BlockstreamService::process_entries( BlockstreamService::process_entries(
&slot_full_receiver, &slot_full_receiver,
&Arc::new(blocktree), &Arc::new(blockstore),
&mut blockstream, &mut blockstream,
) )
.unwrap(); .unwrap();

View File

@ -5,7 +5,7 @@ use self::standard_broadcast_run::StandardBroadcastRun;
use crate::cluster_info::{ClusterInfo, ClusterInfoError}; use crate::cluster_info::{ClusterInfo, ClusterInfoError};
use crate::poh_recorder::WorkingBankEntry; use crate::poh_recorder::WorkingBankEntry;
use crate::result::{Error, Result}; use crate::result::{Error, Result};
use solana_ledger::blocktree::Blocktree; use solana_ledger::blockstore::Blockstore;
use solana_ledger::shred::Shred; use solana_ledger::shred::Shred;
use solana_ledger::staking_utils; use solana_ledger::staking_utils;
use solana_metrics::{inc_new_counter_error, inc_new_counter_info}; use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
@ -44,7 +44,7 @@ impl BroadcastStageType {
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
receiver: Receiver<WorkingBankEntry>, receiver: Receiver<WorkingBankEntry>,
exit_sender: &Arc<AtomicBool>, exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
shred_version: u16, shred_version: u16,
) -> BroadcastStage { ) -> BroadcastStage {
let keypair = cluster_info.read().unwrap().keypair.clone(); let keypair = cluster_info.read().unwrap().keypair.clone();
@ -54,7 +54,7 @@ impl BroadcastStageType {
cluster_info, cluster_info,
receiver, receiver,
exit_sender, exit_sender,
blocktree, blockstore,
StandardBroadcastRun::new(keypair, shred_version), StandardBroadcastRun::new(keypair, shred_version),
), ),
@ -63,7 +63,7 @@ impl BroadcastStageType {
cluster_info, cluster_info,
receiver, receiver,
exit_sender, exit_sender,
blocktree, blockstore,
FailEntryVerificationBroadcastRun::new(keypair, shred_version), FailEntryVerificationBroadcastRun::new(keypair, shred_version),
), ),
@ -72,7 +72,7 @@ impl BroadcastStageType {
cluster_info, cluster_info,
receiver, receiver,
exit_sender, exit_sender,
blocktree, blockstore,
BroadcastFakeShredsRun::new(keypair, 0, shred_version), BroadcastFakeShredsRun::new(keypair, 0, shred_version),
), ),
} }
@ -83,10 +83,10 @@ type TransmitShreds = (Option<Arc<HashMap<Pubkey, u64>>>, Arc<Vec<Shred>>);
trait BroadcastRun { trait BroadcastRun {
fn run( fn run(
&mut self, &mut self,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>, receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>, socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>, blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()>; ) -> Result<()>;
fn transmit( fn transmit(
&self, &self,
@ -97,7 +97,7 @@ trait BroadcastRun {
fn record( fn record(
&self, &self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>, receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
) -> Result<()>; ) -> Result<()>;
} }
@ -126,14 +126,15 @@ pub struct BroadcastStage {
impl BroadcastStage { impl BroadcastStage {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
fn run( fn run(
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>, receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>, socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>, blockstore_sender: &Sender<Arc<Vec<Shred>>>,
mut broadcast_stage_run: impl BroadcastRun, mut broadcast_stage_run: impl BroadcastRun,
) -> BroadcastStageReturnType { ) -> BroadcastStageReturnType {
loop { loop {
let res = broadcast_stage_run.run(blocktree, receiver, socket_sender, blocktree_sender); let res =
broadcast_stage_run.run(blockstore, receiver, socket_sender, blockstore_sender);
let res = Self::handle_error(res); let res = Self::handle_error(res);
if let Some(res) = res { if let Some(res) = res {
return res; return res;
@ -180,19 +181,25 @@ impl BroadcastStage {
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
receiver: Receiver<WorkingBankEntry>, receiver: Receiver<WorkingBankEntry>,
exit_sender: &Arc<AtomicBool>, exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone, broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone,
) -> Self { ) -> Self {
let btree = blocktree.clone(); let btree = blockstore.clone();
let exit = exit_sender.clone(); let exit = exit_sender.clone();
let (socket_sender, socket_receiver) = channel(); let (socket_sender, socket_receiver) = channel();
let (blocktree_sender, blocktree_receiver) = channel(); let (blockstore_sender, blockstore_receiver) = channel();
let bs_run = broadcast_stage_run.clone(); let bs_run = broadcast_stage_run.clone();
let thread_hdl = Builder::new() let thread_hdl = Builder::new()
.name("solana-broadcaster".to_string()) .name("solana-broadcaster".to_string())
.spawn(move || { .spawn(move || {
let _finalizer = Finalizer::new(exit); let _finalizer = Finalizer::new(exit);
Self::run(&btree, &receiver, &socket_sender, &blocktree_sender, bs_run) Self::run(
&btree,
&receiver,
&socket_sender,
&blockstore_sender,
bs_run,
)
}) })
.unwrap(); .unwrap();
let mut thread_hdls = vec![thread_hdl]; let mut thread_hdls = vec![thread_hdl];
@ -213,15 +220,15 @@ impl BroadcastStage {
.unwrap(); .unwrap();
thread_hdls.push(t); thread_hdls.push(t);
} }
let blocktree_receiver = Arc::new(Mutex::new(blocktree_receiver)); let blockstore_receiver = Arc::new(Mutex::new(blockstore_receiver));
for _ in 0..NUM_INSERT_THREADS { for _ in 0..NUM_INSERT_THREADS {
let blocktree_receiver = blocktree_receiver.clone(); let blockstore_receiver = blockstore_receiver.clone();
let bs_record = broadcast_stage_run.clone(); let bs_record = broadcast_stage_run.clone();
let btree = blocktree.clone(); let btree = blockstore.clone();
let t = Builder::new() let t = Builder::new()
.name("solana-broadcaster-record".to_string()) .name("solana-broadcaster-record".to_string())
.spawn(move || loop { .spawn(move || loop {
let res = bs_record.record(&blocktree_receiver, &btree); let res = bs_record.record(&blockstore_receiver, &btree);
let res = Self::handle_error(res); let res = Self::handle_error(res);
if let Some(res) = res { if let Some(res) = res {
return res; return res;
@ -248,7 +255,7 @@ mod test {
use crate::cluster_info::{ClusterInfo, Node}; use crate::cluster_info::{ClusterInfo, Node};
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::entry::create_ticks; use solana_ledger::entry::create_ticks;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
@ -261,7 +268,7 @@ mod test {
use std::time::Duration; use std::time::Duration;
struct MockBroadcastStage { struct MockBroadcastStage {
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
broadcast_service: BroadcastStage, broadcast_service: BroadcastStage,
bank: Arc<Bank>, bank: Arc<Bank>,
} }
@ -272,7 +279,7 @@ mod test {
entry_receiver: Receiver<WorkingBankEntry>, entry_receiver: Receiver<WorkingBankEntry>,
) -> MockBroadcastStage { ) -> MockBroadcastStage {
// Make the database ledger // Make the database ledger
let blocktree = Arc::new(Blocktree::open(ledger_path).unwrap()); let blockstore = Arc::new(Blockstore::open(ledger_path).unwrap());
// Make the leader node and scheduler // Make the leader node and scheduler
let leader_info = Node::new_localhost_with_pubkey(leader_pubkey); let leader_info = Node::new_localhost_with_pubkey(leader_pubkey);
@ -298,12 +305,12 @@ mod test {
cluster_info, cluster_info,
entry_receiver, entry_receiver,
&exit_sender, &exit_sender,
&blocktree, &blockstore,
StandardBroadcastRun::new(leader_keypair, 0), StandardBroadcastRun::new(leader_keypair, 0),
); );
MockBroadcastStage { MockBroadcastStage {
blocktree, blockstore,
broadcast_service, broadcast_service,
bank, bank,
} }
@ -350,8 +357,8 @@ mod test {
ticks_per_slot, ticks_per_slot,
); );
let blocktree = broadcast_service.blocktree; let blockstore = broadcast_service.blockstore;
let (entries, _, _) = blocktree let (entries, _, _) = blockstore
.get_slot_entries_with_shred_info(slot, 0) .get_slot_entries_with_shred_info(slot, 0)
.expect("Expect entries to be present"); .expect("Expect entries to be present");
assert_eq!(entries.len(), max_tick_height as usize); assert_eq!(entries.len(), max_tick_height as usize);
@ -363,6 +370,6 @@ mod test {
.expect("Expect successful join of broadcast service"); .expect("Expect successful join of broadcast service");
} }
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
} }
} }

View File

@ -26,17 +26,17 @@ impl BroadcastFakeShredsRun {
impl BroadcastRun for BroadcastFakeShredsRun { impl BroadcastRun for BroadcastFakeShredsRun {
fn run( fn run(
&mut self, &mut self,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>, receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>, socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>, blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> { ) -> Result<()> {
// 1) Pull entries from banking stage // 1) Pull entries from banking stage
let receive_results = broadcast_utils::recv_slot_entries(receiver)?; let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
let bank = receive_results.bank.clone(); let bank = receive_results.bank.clone();
let last_tick_height = receive_results.last_tick_height; let last_tick_height = receive_results.last_tick_height;
let next_shred_index = blocktree let next_shred_index = blockstore
.meta(bank.slot()) .meta(bank.slot())
.expect("Database error") .expect("Database error")
.map(|meta| meta.consumed) .map(|meta| meta.consumed)
@ -83,7 +83,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
} }
let data_shreds = Arc::new(data_shreds); let data_shreds = Arc::new(data_shreds);
blocktree_sender.send(data_shreds.clone())?; blockstore_sender.send(data_shreds.clone())?;
// 3) Start broadcast step // 3) Start broadcast step
//some indicates fake shreds //some indicates fake shreds
@ -121,10 +121,10 @@ impl BroadcastRun for BroadcastFakeShredsRun {
fn record( fn record(
&self, &self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>, receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
) -> Result<()> { ) -> Result<()> {
for data_shreds in receiver.lock().unwrap().iter() { for data_shreds in receiver.lock().unwrap().iter() {
blocktree.insert_shreds(data_shreds.to_vec(), None, true)?; blockstore.insert_shreds(data_shreds.to_vec(), None, true)?;
} }
Ok(()) Ok(())
} }

View File

@ -21,10 +21,10 @@ impl FailEntryVerificationBroadcastRun {
impl BroadcastRun for FailEntryVerificationBroadcastRun { impl BroadcastRun for FailEntryVerificationBroadcastRun {
fn run( fn run(
&mut self, &mut self,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>, receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>, socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>, blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> { ) -> Result<()> {
// 1) Pull entries from banking stage // 1) Pull entries from banking stage
let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?; let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?;
@ -38,7 +38,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
last_entry.hash = Hash::default(); last_entry.hash = Hash::default();
} }
let next_shred_index = blocktree let next_shred_index = blockstore
.meta(bank.slot()) .meta(bank.slot())
.expect("Database error") .expect("Database error")
.map(|meta| meta.consumed) .map(|meta| meta.consumed)
@ -61,7 +61,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
); );
let data_shreds = Arc::new(data_shreds); let data_shreds = Arc::new(data_shreds);
blocktree_sender.send(data_shreds.clone())?; blockstore_sender.send(data_shreds.clone())?;
// 3) Start broadcast step // 3) Start broadcast step
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot()); let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch); let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
@ -90,12 +90,12 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
fn record( fn record(
&self, &self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>, receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
) -> Result<()> { ) -> Result<()> {
let all_shreds = receiver.lock().unwrap().recv()?; let all_shreds = receiver.lock().unwrap().recv()?;
blocktree blockstore
.insert_shreds(all_shreds.to_vec(), None, true) .insert_shreds(all_shreds.to_vec(), None, true)
.expect("Failed to insert shreds in blocktree"); .expect("Failed to insert shreds in blockstore");
Ok(()) Ok(())
} }
} }

View File

@ -83,13 +83,13 @@ impl StandardBroadcastRun {
last_unfinished_slot_shred last_unfinished_slot_shred
} }
fn init_shredder(&self, blocktree: &Blocktree, reference_tick: u8) -> (Shredder, u32) { fn init_shredder(&self, blockstore: &Blockstore, reference_tick: u8) -> (Shredder, u32) {
let (slot, parent_slot) = self.current_slot_and_parent.unwrap(); let (slot, parent_slot) = self.current_slot_and_parent.unwrap();
let next_shred_index = self let next_shred_index = self
.unfinished_slot .unfinished_slot
.map(|s| s.next_shred_index) .map(|s| s.next_shred_index)
.unwrap_or_else(|| { .unwrap_or_else(|| {
blocktree blockstore
.meta(slot) .meta(slot)
.expect("Database error") .expect("Database error")
.map(|meta| meta.consumed) .map(|meta| meta.consumed)
@ -132,27 +132,27 @@ impl StandardBroadcastRun {
&mut self, &mut self,
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
sock: &UdpSocket, sock: &UdpSocket,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
receive_results: ReceiveResults, receive_results: ReceiveResults,
) -> Result<()> { ) -> Result<()> {
let (bsend, brecv) = channel(); let (bsend, brecv) = channel();
let (ssend, srecv) = channel(); let (ssend, srecv) = channel();
self.process_receive_results(&blocktree, &ssend, &bsend, receive_results)?; self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?;
let srecv = Arc::new(Mutex::new(srecv)); let srecv = Arc::new(Mutex::new(srecv));
let brecv = Arc::new(Mutex::new(brecv)); let brecv = Arc::new(Mutex::new(brecv));
//data //data
let _ = self.transmit(&srecv, cluster_info, sock); let _ = self.transmit(&srecv, cluster_info, sock);
//coding //coding
let _ = self.transmit(&srecv, cluster_info, sock); let _ = self.transmit(&srecv, cluster_info, sock);
let _ = self.record(&brecv, blocktree); let _ = self.record(&brecv, blockstore);
Ok(()) Ok(())
} }
fn process_receive_results( fn process_receive_results(
&mut self, &mut self,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
socket_sender: &Sender<TransmitShreds>, socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>, blockstore_sender: &Sender<Arc<Vec<Shred>>>,
receive_results: ReceiveResults, receive_results: ReceiveResults,
) -> Result<()> { ) -> Result<()> {
let mut receive_elapsed = receive_results.time_elapsed; let mut receive_elapsed = receive_results.time_elapsed;
@ -181,7 +181,7 @@ impl StandardBroadcastRun {
// 2) Convert entries to shreds and coding shreds // 2) Convert entries to shreds and coding shreds
let (shredder, next_shred_index) = self.init_shredder( let (shredder, next_shred_index) = self.init_shredder(
blocktree, blockstore,
(bank.tick_height() % bank.ticks_per_slot()) as u8, (bank.tick_height() % bank.ticks_per_slot()) as u8,
); );
let mut data_shreds = self.entries_to_data_shreds( let mut data_shreds = self.entries_to_data_shreds(
@ -190,13 +190,13 @@ impl StandardBroadcastRun {
&receive_results.entries, &receive_results.entries,
last_tick_height == bank.max_tick_height(), last_tick_height == bank.max_tick_height(),
); );
//Insert the first shred so blocktree stores that the leader started this block //Insert the first shred so blockstore stores that the leader started this block
//This must be done before the blocks are sent out over the wire. //This must be done before the blocks are sent out over the wire.
if !data_shreds.is_empty() && data_shreds[0].index() == 0 { if !data_shreds.is_empty() && data_shreds[0].index() == 0 {
let first = vec![data_shreds[0].clone()]; let first = vec![data_shreds[0].clone()];
blocktree blockstore
.insert_shreds(first, None, true) .insert_shreds(first, None, true)
.expect("Failed to insert shreds in blocktree"); .expect("Failed to insert shreds in blockstore");
} }
let last_data_shred = data_shreds.len(); let last_data_shred = data_shreds.len();
if let Some(last_shred) = last_unfinished_slot_shred { if let Some(last_shred) = last_unfinished_slot_shred {
@ -209,7 +209,7 @@ impl StandardBroadcastRun {
let stakes = stakes.map(Arc::new); let stakes = stakes.map(Arc::new);
let data_shreds = Arc::new(data_shreds); let data_shreds = Arc::new(data_shreds);
socket_sender.send((stakes.clone(), data_shreds.clone()))?; socket_sender.send((stakes.clone(), data_shreds.clone()))?;
blocktree_sender.send(data_shreds.clone())?; blockstore_sender.send(data_shreds.clone())?;
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]); let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
let coding_shreds = Arc::new(coding_shreds); let coding_shreds = Arc::new(coding_shreds);
socket_sender.send((stakes, coding_shreds))?; socket_sender.send((stakes, coding_shreds))?;
@ -227,8 +227,8 @@ impl StandardBroadcastRun {
Ok(()) Ok(())
} }
fn insert(&self, blocktree: &Arc<Blocktree>, shreds: Arc<Vec<Shred>>) -> Result<()> { fn insert(&self, blockstore: &Arc<Blockstore>, shreds: Arc<Vec<Shred>>) -> Result<()> {
// Insert shreds into blocktree // Insert shreds into blockstore
let insert_shreds_start = Instant::now(); let insert_shreds_start = Instant::now();
//The first shred is inserted synchronously //The first shred is inserted synchronously
let data_shreds = if !shreds.is_empty() && shreds[0].index() == 0 { let data_shreds = if !shreds.is_empty() && shreds[0].index() == 0 {
@ -236,9 +236,9 @@ impl StandardBroadcastRun {
} else { } else {
shreds.to_vec() shreds.to_vec()
}; };
blocktree blockstore
.insert_shreds(data_shreds, None, true) .insert_shreds(data_shreds, None, true)
.expect("Failed to insert shreds in blocktree"); .expect("Failed to insert shreds in blockstore");
let insert_shreds_elapsed = insert_shreds_start.elapsed(); let insert_shreds_elapsed = insert_shreds_start.elapsed();
self.update_broadcast_stats(BroadcastStats { self.update_broadcast_stats(BroadcastStats {
insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed), insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed),
@ -317,13 +317,18 @@ impl StandardBroadcastRun {
impl BroadcastRun for StandardBroadcastRun { impl BroadcastRun for StandardBroadcastRun {
fn run( fn run(
&mut self, &mut self,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>, receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>, socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>, blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> { ) -> Result<()> {
let receive_results = broadcast_utils::recv_slot_entries(receiver)?; let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
self.process_receive_results(blocktree, socket_sender, blocktree_sender, receive_results) self.process_receive_results(
blockstore,
socket_sender,
blockstore_sender,
receive_results,
)
} }
fn transmit( fn transmit(
&self, &self,
@ -337,10 +342,10 @@ impl BroadcastRun for StandardBroadcastRun {
fn record( fn record(
&self, &self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>, receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
) -> Result<()> { ) -> Result<()> {
let shreds = receiver.lock().unwrap().recv()?; let shreds = receiver.lock().unwrap().recv()?;
self.insert(blocktree, shreds) self.insert(blockstore, shreds)
} }
} }
@ -350,7 +355,7 @@ mod test {
use crate::cluster_info::{ClusterInfo, Node}; use crate::cluster_info::{ClusterInfo, Node};
use crate::genesis_utils::create_genesis_config; use crate::genesis_utils::create_genesis_config;
use solana_ledger::{ use solana_ledger::{
blocktree::Blocktree, entry::create_ticks, get_tmp_ledger_path, blockstore::Blockstore, entry::create_ticks, get_tmp_ledger_path,
shred::max_ticks_per_n_shreds, shred::max_ticks_per_n_shreds,
}; };
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
@ -365,7 +370,7 @@ mod test {
fn setup( fn setup(
num_shreds_per_slot: Slot, num_shreds_per_slot: Slot,
) -> ( ) -> (
Arc<Blocktree>, Arc<Blockstore>,
GenesisConfig, GenesisConfig,
Arc<RwLock<ClusterInfo>>, Arc<RwLock<ClusterInfo>>,
Arc<Bank>, Arc<Bank>,
@ -374,8 +379,8 @@ mod test {
) { ) {
// Setup // Setup
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
); );
let leader_keypair = Arc::new(Keypair::new()); let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey(); let leader_pubkey = leader_keypair.pubkey();
@ -388,7 +393,7 @@ mod test {
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1; genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
let bank0 = Arc::new(Bank::new(&genesis_config)); let bank0 = Arc::new(Bank::new(&genesis_config));
( (
blocktree, blockstore,
genesis_config, genesis_config,
cluster_info, cluster_info,
bank0, bank0,
@ -433,7 +438,7 @@ mod test {
fn test_slot_interrupt() { fn test_slot_interrupt() {
// Setup // Setup
let num_shreds_per_slot = 2; let num_shreds_per_slot = 2;
let (blocktree, genesis_config, cluster_info, bank0, leader_keypair, socket) = let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket) =
setup(num_shreds_per_slot); setup(num_shreds_per_slot);
// Insert 1 less than the number of ticks needed to finish the slot // Insert 1 less than the number of ticks needed to finish the slot
@ -448,14 +453,14 @@ mod test {
// Step 1: Make an incomplete transmission for slot 0 // Step 1: Make an incomplete transmission for slot 0
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair.clone(), 0); let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair.clone(), 0);
standard_broadcast_run standard_broadcast_run
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results) .test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
.unwrap(); .unwrap();
let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap(); let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap();
assert_eq!(unfinished_slot.next_shred_index as u64, num_shreds_per_slot); assert_eq!(unfinished_slot.next_shred_index as u64, num_shreds_per_slot);
assert_eq!(unfinished_slot.slot, 0); assert_eq!(unfinished_slot.slot, 0);
assert_eq!(unfinished_slot.parent, 0); assert_eq!(unfinished_slot.parent, 0);
// Make sure the slot is not complete // Make sure the slot is not complete
assert!(!blocktree.is_full(0)); assert!(!blockstore.is_full(0));
// Modify the stats, should reset later // Modify the stats, should reset later
standard_broadcast_run standard_broadcast_run
.stats .stats
@ -463,10 +468,10 @@ mod test {
.unwrap() .unwrap()
.receive_elapsed = 10; .receive_elapsed = 10;
// Try to fetch ticks from blocktree, nothing should break // Try to fetch ticks from blockstore, nothing should break
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0); assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
assert_eq!( assert_eq!(
blocktree blockstore
.get_slot_entries(0, num_shreds_per_slot, None) .get_slot_entries(0, num_shreds_per_slot, None)
.unwrap(), .unwrap(),
vec![], vec![],
@ -487,7 +492,7 @@ mod test {
last_tick_height: (ticks1.len() - 1) as u64, last_tick_height: (ticks1.len() - 1) as u64,
}; };
standard_broadcast_run standard_broadcast_run
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results) .test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
.unwrap(); .unwrap();
let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap(); let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap();
@ -503,10 +508,10 @@ mod test {
0 0
); );
// Try to fetch the incomplete ticks from blocktree, should succeed // Try to fetch the incomplete ticks from blockstore, should succeed
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0); assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
assert_eq!( assert_eq!(
blocktree blockstore
.get_slot_entries(0, num_shreds_per_slot, None) .get_slot_entries(0, num_shreds_per_slot, None)
.unwrap(), .unwrap(),
vec![], vec![],
@ -517,7 +522,7 @@ mod test {
fn test_slot_finish() { fn test_slot_finish() {
// Setup // Setup
let num_shreds_per_slot = 2; let num_shreds_per_slot = 2;
let (blocktree, genesis_config, cluster_info, bank0, leader_keypair, socket) = let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket) =
setup(num_shreds_per_slot); setup(num_shreds_per_slot);
// Insert complete slot of ticks needed to finish the slot // Insert complete slot of ticks needed to finish the slot
@ -531,7 +536,7 @@ mod test {
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair, 0); let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair, 0);
standard_broadcast_run standard_broadcast_run
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results) .test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
.unwrap(); .unwrap();
assert!(standard_broadcast_run.unfinished_slot.is_none()) assert!(standard_broadcast_run.unfinished_slot.is_none())
} }

View File

@ -1,4 +1,4 @@
use solana_ledger::blocktree::Blocktree; use solana_ledger::blockstore::Blockstore;
use solana_sdk::clock::Slot; use solana_sdk::clock::Slot;
use std::fs::File; use std::fs::File;
use std::io; use std::io;
@ -12,7 +12,7 @@ pub const CHACHA_BLOCK_SIZE: usize = 64;
pub const CHACHA_KEY_SIZE: usize = 32; pub const CHACHA_KEY_SIZE: usize = 32;
pub fn chacha_cbc_encrypt_ledger( pub fn chacha_cbc_encrypt_ledger(
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
start_slot: Slot, start_slot: Slot,
slots_per_segment: u64, slots_per_segment: u64,
out_path: &Path, out_path: &Path,
@ -28,7 +28,7 @@ pub fn chacha_cbc_encrypt_ledger(
let mut current_slot = start_slot; let mut current_slot = start_slot;
let mut start_index = 0; let mut start_index = 0;
loop { loop {
match blocktree.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) { match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
Ok((last_index, mut size)) => { Ok((last_index, mut size)) => {
debug!( debug!(
"chacha: encrypting slice: {} num_shreds: {} data_len: {}", "chacha: encrypting slice: {} num_shreds: {} data_len: {}",
@ -75,7 +75,7 @@ pub fn chacha_cbc_encrypt_ledger(
mod tests { mod tests {
use crate::chacha::chacha_cbc_encrypt_ledger; use crate::chacha::chacha_cbc_encrypt_ledger;
use crate::gen_keys::GenKeys; use crate::gen_keys::GenKeys;
use solana_ledger::blocktree::Blocktree; use solana_ledger::blockstore::Blockstore;
use solana_ledger::entry::Entry; use solana_ledger::entry::Entry;
use solana_ledger::get_tmp_ledger_path; use solana_ledger::get_tmp_ledger_path;
use solana_sdk::hash::{hash, Hash, Hasher}; use solana_sdk::hash::{hash, Hash, Hasher};
@ -131,7 +131,7 @@ mod tests {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let ticks_per_slot = 16; let ticks_per_slot = 16;
let slots_per_segment = 32; let slots_per_segment = 32;
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let out_path = tmp_file_path("test_encrypt_ledger"); let out_path = tmp_file_path("test_encrypt_ledger");
let seed = [2u8; 32]; let seed = [2u8; 32];
@ -139,7 +139,7 @@ mod tests {
let keypair = rnd.gen_keypair(); let keypair = rnd.gen_keypair();
let entries = make_tiny_deterministic_test_entries(slots_per_segment); let entries = make_tiny_deterministic_test_entries(slots_per_segment);
blocktree blockstore
.write_entries( .write_entries(
0, 0,
0, 0,
@ -157,8 +157,14 @@ mod tests {
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234 "abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234" abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
); );
chacha_cbc_encrypt_ledger(&blocktree, 0, slots_per_segment as u64, &out_path, &mut key) chacha_cbc_encrypt_ledger(
.unwrap(); &blockstore,
0,
slots_per_segment as u64,
&out_path,
&mut key,
)
.unwrap();
let mut out_file = File::open(&out_path).unwrap(); let mut out_file = File::open(&out_path).unwrap();
let mut buf = vec![]; let mut buf = vec![];
let size = out_file.read_to_end(&mut buf).unwrap(); let size = out_file.read_to_end(&mut buf).unwrap();

View File

@ -1,7 +1,7 @@
// Module used by validators to approve storage mining proofs in parallel using the GPU // Module used by validators to approve storage mining proofs in parallel using the GPU
use crate::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE}; use crate::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE};
use solana_ledger::blocktree::Blocktree; use solana_ledger::blockstore::Blockstore;
use solana_perf::perf_libs; use solana_perf::perf_libs;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use std::io; use std::io;
@ -13,7 +13,7 @@ use std::sync::Arc;
// Then sample each block at the offsets provided by samples argument with sha256 // Then sample each block at the offsets provided by samples argument with sha256
// and return the vec of sha states // and return the vec of sha states
pub fn chacha_cbc_encrypt_file_many_keys( pub fn chacha_cbc_encrypt_file_many_keys(
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
segment: u64, segment: u64,
slots_per_segment: u64, slots_per_segment: u64,
ivecs: &mut [u8], ivecs: &mut [u8],
@ -46,7 +46,7 @@ pub fn chacha_cbc_encrypt_file_many_keys(
(api.chacha_init_sha_state)(int_sha_states.as_mut_ptr(), num_keys as u32); (api.chacha_init_sha_state)(int_sha_states.as_mut_ptr(), num_keys as u32);
} }
loop { loop {
match blocktree.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) { match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
Ok((last_index, mut size)) => { Ok((last_index, mut size)) => {
debug!( debug!(
"chacha_cuda: encrypting segment: {} num_shreds: {} data_len: {}", "chacha_cuda: encrypting segment: {} num_shreds: {} data_len: {}",
@ -134,9 +134,9 @@ mod tests {
let entries = create_ticks(slots_per_segment, 0, Hash::default()); let entries = create_ticks(slots_per_segment, 0, Hash::default());
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let ticks_per_slot = 16; let ticks_per_slot = 16;
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
blocktree blockstore
.write_entries( .write_entries(
0, 0,
0, 0,
@ -160,7 +160,7 @@ mod tests {
let mut cpu_iv = ivecs.clone(); let mut cpu_iv = ivecs.clone();
chacha_cbc_encrypt_ledger( chacha_cbc_encrypt_ledger(
&blocktree, &blockstore,
0, 0,
slots_per_segment as u64, slots_per_segment as u64,
out_path, out_path,
@ -171,7 +171,7 @@ mod tests {
let ref_hash = sample_file(&out_path, &samples).unwrap(); let ref_hash = sample_file(&out_path, &samples).unwrap();
let hashes = chacha_cbc_encrypt_file_many_keys( let hashes = chacha_cbc_encrypt_file_many_keys(
&blocktree, &blockstore,
0, 0,
slots_per_segment as u64, slots_per_segment as u64,
&mut ivecs, &mut ivecs,
@ -196,8 +196,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let ticks_per_slot = 90; let ticks_per_slot = 90;
let entries = create_ticks(2 * ticks_per_slot, 0, Hash::default()); let entries = create_ticks(2 * ticks_per_slot, 0, Hash::default());
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
blocktree blockstore
.write_entries( .write_entries(
0, 0,
0, 0,
@ -224,7 +224,7 @@ mod tests {
ivec[0] = i; ivec[0] = i;
ivecs.extend(ivec.clone().iter()); ivecs.extend(ivec.clone().iter());
chacha_cbc_encrypt_ledger( chacha_cbc_encrypt_ledger(
&blocktree.clone(), &blockstore.clone(),
0, 0,
DEFAULT_SLOTS_PER_SEGMENT, DEFAULT_SLOTS_PER_SEGMENT,
out_path, out_path,
@ -242,7 +242,7 @@ mod tests {
} }
let hashes = chacha_cbc_encrypt_file_many_keys( let hashes = chacha_cbc_encrypt_file_many_keys(
&blocktree, &blockstore,
0, 0,
DEFAULT_SLOTS_PER_SEGMENT, DEFAULT_SLOTS_PER_SEGMENT,
&mut ivecs, &mut ivecs,
@ -267,9 +267,9 @@ mod tests {
let mut keys = hex!("abc123"); let mut keys = hex!("abc123");
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let samples = [0]; let samples = [0];
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
assert!(chacha_cbc_encrypt_file_many_keys( assert!(chacha_cbc_encrypt_file_many_keys(
&blocktree, &blockstore,
0, 0,
DEFAULT_SLOTS_PER_SEGMENT, DEFAULT_SLOTS_PER_SEGMENT,
&mut keys, &mut keys,

View File

@ -30,7 +30,7 @@ use bincode::{serialize, serialized_size};
use core::cmp; use core::cmp;
use itertools::Itertools; use itertools::Itertools;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree, staking_utils}; use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore, staking_utils};
use solana_measure::thread_mem_usage; use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error}; use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
use solana_net_utils::{ use solana_net_utils::{
@ -1113,12 +1113,12 @@ impl ClusterInfo {
} }
fn get_data_shred_as_packet( fn get_data_shred_as_packet(
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
slot: Slot, slot: Slot,
shred_index: u64, shred_index: u64,
dest: &SocketAddr, dest: &SocketAddr,
) -> Result<Option<Packet>> { ) -> Result<Option<Packet>> {
let data = blocktree.get_data_shred(slot, shred_index)?; let data = blockstore.get_data_shred(slot, shred_index)?;
Ok(data.map(|data| { Ok(data.map(|data| {
let mut packet = Packet::default(); let mut packet = Packet::default();
packet.meta.size = data.len(); packet.meta.size = data.len();
@ -1132,14 +1132,14 @@ impl ClusterInfo {
recycler: &PacketsRecycler, recycler: &PacketsRecycler,
from: &ContactInfo, from: &ContactInfo,
from_addr: &SocketAddr, from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>, blockstore: Option<&Arc<Blockstore>>,
me: &ContactInfo, me: &ContactInfo,
slot: Slot, slot: Slot,
shred_index: u64, shred_index: u64,
) -> Option<Packets> { ) -> Option<Packets> {
if let Some(blocktree) = blocktree { if let Some(blockstore) = blockstore {
// Try to find the requested index in one of the slots // Try to find the requested index in one of the slots
let packet = Self::get_data_shred_as_packet(blocktree, slot, shred_index, from_addr); let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
if let Ok(Some(packet)) = packet { if let Ok(Some(packet)) = packet {
inc_new_counter_debug!("cluster_info-window-request-ledger", 1); inc_new_counter_debug!("cluster_info-window-request-ledger", 1);
@ -1166,17 +1166,17 @@ impl ClusterInfo {
fn run_highest_window_request( fn run_highest_window_request(
recycler: &PacketsRecycler, recycler: &PacketsRecycler,
from_addr: &SocketAddr, from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>, blockstore: Option<&Arc<Blockstore>>,
slot: Slot, slot: Slot,
highest_index: u64, highest_index: u64,
) -> Option<Packets> { ) -> Option<Packets> {
let blocktree = blocktree?; let blockstore = blockstore?;
// Try to find the requested index in one of the slots // Try to find the requested index in one of the slots
let meta = blocktree.meta(slot).ok()??; let meta = blockstore.meta(slot).ok()??;
if meta.received > highest_index { if meta.received > highest_index {
// meta.received must be at least 1 by this point // meta.received must be at least 1 by this point
let packet = let packet =
Self::get_data_shred_as_packet(blocktree, slot, meta.received - 1, from_addr) Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
.ok()??; .ok()??;
return Some(Packets::new_with_recycler_data( return Some(Packets::new_with_recycler_data(
recycler, recycler,
@ -1190,19 +1190,19 @@ impl ClusterInfo {
fn run_orphan( fn run_orphan(
recycler: &PacketsRecycler, recycler: &PacketsRecycler,
from_addr: &SocketAddr, from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>, blockstore: Option<&Arc<Blockstore>>,
mut slot: Slot, mut slot: Slot,
max_responses: usize, max_responses: usize,
) -> Option<Packets> { ) -> Option<Packets> {
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan"); let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blocktree) = blocktree { if let Some(blockstore) = blockstore {
// Try to find the next "n" parent slots of the input slot // Try to find the next "n" parent slots of the input slot
while let Ok(Some(meta)) = blocktree.meta(slot) { while let Ok(Some(meta)) = blockstore.meta(slot) {
if meta.received == 0 { if meta.received == 0 {
break; break;
} }
let packet = let packet =
Self::get_data_shred_as_packet(blocktree, slot, meta.received - 1, from_addr); Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
if let Ok(Some(packet)) = packet { if let Ok(Some(packet)) = packet {
res.packets.push(packet); res.packets.push(packet);
} }
@ -1222,7 +1222,7 @@ impl ClusterInfo {
fn handle_packets( fn handle_packets(
me: &Arc<RwLock<Self>>, me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler, recycler: &PacketsRecycler,
blocktree: Option<&Arc<Blocktree>>, blockstore: Option<&Arc<Blockstore>>,
stakes: &HashMap<Pubkey, u64>, stakes: &HashMap<Pubkey, u64>,
packets: Packets, packets: Packets,
response_sender: &PacketSender, response_sender: &PacketSender,
@ -1330,7 +1330,8 @@ impl ClusterInfo {
); );
} }
_ => { _ => {
let rsp = Self::handle_repair(me, recycler, &from_addr, blocktree, request); let rsp =
Self::handle_repair(me, recycler, &from_addr, blockstore, request);
if let Some(rsp) = rsp { if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp); let _ignore_disconnect = response_sender.send(rsp);
} }
@ -1475,7 +1476,7 @@ impl ClusterInfo {
me: &Arc<RwLock<Self>>, me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler, recycler: &PacketsRecycler,
from_addr: &SocketAddr, from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>, blockstore: Option<&Arc<Blockstore>>,
request: Protocol, request: Protocol,
) -> Option<Packets> { ) -> Option<Packets> {
let now = Instant::now(); let now = Instant::now();
@ -1511,7 +1512,7 @@ impl ClusterInfo {
recycler, recycler,
from, from,
&from_addr, &from_addr,
blocktree, blockstore,
&my_info, &my_info,
*slot, *slot,
*shred_index, *shred_index,
@ -1526,7 +1527,7 @@ impl ClusterInfo {
Self::run_highest_window_request( Self::run_highest_window_request(
recycler, recycler,
&from_addr, &from_addr,
blocktree, blockstore,
*slot, *slot,
*highest_index, *highest_index,
), ),
@ -1539,7 +1540,7 @@ impl ClusterInfo {
Self::run_orphan( Self::run_orphan(
recycler, recycler,
&from_addr, &from_addr,
blocktree, blockstore,
*slot, *slot,
MAX_ORPHAN_REPAIR_RESPONSES, MAX_ORPHAN_REPAIR_RESPONSES,
), ),
@ -1559,7 +1560,7 @@ impl ClusterInfo {
fn run_listen( fn run_listen(
obj: &Arc<RwLock<Self>>, obj: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler, recycler: &PacketsRecycler,
blocktree: Option<&Arc<Blocktree>>, blockstore: Option<&Arc<Blockstore>>,
bank_forks: Option<&Arc<RwLock<BankForks>>>, bank_forks: Option<&Arc<RwLock<BankForks>>>,
requests_receiver: &PacketReceiver, requests_receiver: &PacketReceiver,
response_sender: &PacketSender, response_sender: &PacketSender,
@ -1574,12 +1575,12 @@ impl ClusterInfo {
None => HashMap::new(), None => HashMap::new(),
}; };
Self::handle_packets(obj, &recycler, blocktree, &stakes, reqs, response_sender); Self::handle_packets(obj, &recycler, blockstore, &stakes, reqs, response_sender);
Ok(()) Ok(())
} }
pub fn listen( pub fn listen(
me: Arc<RwLock<Self>>, me: Arc<RwLock<Self>>,
blocktree: Option<Arc<Blocktree>>, blockstore: Option<Arc<Blockstore>>,
bank_forks: Option<Arc<RwLock<BankForks>>>, bank_forks: Option<Arc<RwLock<BankForks>>>,
requests_receiver: PacketReceiver, requests_receiver: PacketReceiver,
response_sender: PacketSender, response_sender: PacketSender,
@ -1593,7 +1594,7 @@ impl ClusterInfo {
let e = Self::run_listen( let e = Self::run_listen(
&me, &me,
&recycler, &recycler,
blocktree.as_ref(), blockstore.as_ref(),
bank_forks.as_ref(), bank_forks.as_ref(),
&requests_receiver, &requests_receiver,
&response_sender, &response_sender,
@ -1916,9 +1917,9 @@ mod tests {
use crate::repair_service::RepairType; use crate::repair_service::RepairType;
use crate::result::Error; use crate::result::Error;
use rayon::prelude::*; use rayon::prelude::*;
use solana_ledger::blocktree::make_many_slot_entries; use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::blocktree::Blocktree; use solana_ledger::blockstore::Blockstore;
use solana_ledger::blocktree_processor::fill_blocktree_slot_with_ticks; use solana_ledger::blockstore_processor::fill_blockstore_slot_with_ticks;
use solana_ledger::get_tmp_ledger_path; use solana_ledger::get_tmp_ledger_path;
use solana_ledger::shred::{ use solana_ledger::shred::{
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader, max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
@ -2062,7 +2063,7 @@ mod tests {
solana_logger::setup(); solana_logger::setup();
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let me = ContactInfo::new( let me = ContactInfo::new(
&Pubkey::new_rand(), &Pubkey::new_rand(),
socketaddr!("127.0.0.1:1234"), socketaddr!("127.0.0.1:1234"),
@ -2080,7 +2081,7 @@ mod tests {
&recycler, &recycler,
&me, &me,
&socketaddr_any!(), &socketaddr_any!(),
Some(&blocktree), Some(&blockstore),
&me, &me,
0, 0,
0, 0,
@ -2097,7 +2098,7 @@ mod tests {
CodingShredHeader::default(), CodingShredHeader::default(),
); );
blocktree blockstore
.insert_shreds(vec![shred_info], None, false) .insert_shreds(vec![shred_info], None, false)
.expect("Expect successful ledger write"); .expect("Expect successful ledger write");
@ -2105,7 +2106,7 @@ mod tests {
&recycler, &recycler,
&me, &me,
&socketaddr_any!(), &socketaddr_any!(),
Some(&blocktree), Some(&blockstore),
&me, &me,
2, 2,
1, 1,
@ -2121,7 +2122,7 @@ mod tests {
assert_eq!(rv[0].slot(), 2); assert_eq!(rv[0].slot(), 2);
} }
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
} }
/// test run_window_requestwindow requests respond with the right shred, and do not overrun /// test run_window_requestwindow requests respond with the right shred, and do not overrun
@ -2131,18 +2132,18 @@ mod tests {
solana_logger::setup(); solana_logger::setup();
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv = ClusterInfo::run_highest_window_request( let rv = ClusterInfo::run_highest_window_request(
&recycler, &recycler,
&socketaddr_any!(), &socketaddr_any!(),
Some(&blocktree), Some(&blockstore),
0, 0,
0, 0,
); );
assert!(rv.is_none()); assert!(rv.is_none());
let _ = fill_blocktree_slot_with_ticks( let _ = fill_blockstore_slot_with_ticks(
&blocktree, &blockstore,
max_ticks_per_n_shreds(1) + 1, max_ticks_per_n_shreds(1) + 1,
2, 2,
1, 1,
@ -2152,7 +2153,7 @@ mod tests {
let rv = ClusterInfo::run_highest_window_request( let rv = ClusterInfo::run_highest_window_request(
&recycler, &recycler,
&socketaddr_any!(), &socketaddr_any!(),
Some(&blocktree), Some(&blockstore),
2, 2,
1, 1,
); );
@ -2163,21 +2164,21 @@ mod tests {
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok()) .filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.collect(); .collect();
assert!(!rv.is_empty()); assert!(!rv.is_empty());
let index = blocktree.meta(2).unwrap().unwrap().received - 1; let index = blockstore.meta(2).unwrap().unwrap().received - 1;
assert_eq!(rv[0].index(), index as u32); assert_eq!(rv[0].index(), index as u32);
assert_eq!(rv[0].slot(), 2); assert_eq!(rv[0].slot(), 2);
let rv = ClusterInfo::run_highest_window_request( let rv = ClusterInfo::run_highest_window_request(
&recycler, &recycler,
&socketaddr_any!(), &socketaddr_any!(),
Some(&blocktree), Some(&blockstore),
2, 2,
index + 1, index + 1,
); );
assert!(rv.is_none()); assert!(rv.is_none());
} }
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
} }
#[test] #[test]
@ -2186,25 +2187,27 @@ mod tests {
let recycler = PacketsRecycler::default(); let recycler = PacketsRecycler::default();
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv = ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 2, 0); let rv =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
assert!(rv.is_none()); assert!(rv.is_none());
// Create slots 1, 2, 3 with 5 shreds apiece // Create slots 1, 2, 3 with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(1, 3, 5); let (shreds, _) = make_many_slot_entries(1, 3, 5);
blocktree blockstore
.insert_shreds(shreds, None, false) .insert_shreds(shreds, None, false)
.expect("Expect successful ledger write"); .expect("Expect successful ledger write");
// We don't have slot 4, so we don't know how to service this requeset // We don't have slot 4, so we don't know how to service this requeset
let rv = ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 4, 5); let rv =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
assert!(rv.is_none()); assert!(rv.is_none());
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively // For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
// for this request // for this request
let rv: Vec<_> = let rv: Vec<_> =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 3, 5) ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
.expect("run_orphan packets") .expect("run_orphan packets")
.packets .packets
.iter() .iter()
@ -2213,9 +2216,9 @@ mod tests {
let expected: Vec<_> = (1..=3) let expected: Vec<_> = (1..=3)
.rev() .rev()
.map(|slot| { .map(|slot| {
let index = blocktree.meta(slot).unwrap().unwrap().received - 1; let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
ClusterInfo::get_data_shred_as_packet( ClusterInfo::get_data_shred_as_packet(
&blocktree, &blockstore,
slot, slot,
index, index,
&socketaddr_any!(), &socketaddr_any!(),
@ -2227,7 +2230,7 @@ mod tests {
assert_eq!(rv, expected) assert_eq!(rv, expected)
} }
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction"); Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
} }
fn assert_in_range(x: u16, range: (u16, u16)) { fn assert_in_range(x: u16, range: (u16, u16)) {

View File

@ -5,7 +5,7 @@ use byteorder::{ByteOrder, LittleEndian};
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
use rand::SeedableRng; use rand::SeedableRng;
use rand_chacha::ChaChaRng; use rand_chacha::ChaChaRng;
use solana_ledger::blocktree::Blocktree; use solana_ledger::blockstore::Blockstore;
use solana_ledger::rooted_slot_iterator::RootedSlotIterator; use solana_ledger::rooted_slot_iterator::RootedSlotIterator;
use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey}; use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{ use std::{
@ -89,13 +89,13 @@ pub struct ClusterInfoRepairListener {
impl ClusterInfoRepairListener { impl ClusterInfoRepairListener {
pub fn new( pub fn new(
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
epoch_schedule: EpochSchedule, epoch_schedule: EpochSchedule,
) -> Self { ) -> Self {
let exit = exit.clone(); let exit = exit.clone();
let blocktree = blocktree.clone(); let blockstore = blockstore.clone();
let thread = Builder::new() let thread = Builder::new()
.name("solana-cluster_info_repair_listener".to_string()) .name("solana-cluster_info_repair_listener".to_string())
.spawn(move || { .spawn(move || {
@ -105,7 +105,7 @@ impl ClusterInfoRepairListener {
// 2) The latest root the peer gossiped // 2) The latest root the peer gossiped
let mut peer_infos: HashMap<Pubkey, RepaireeInfo> = HashMap::new(); let mut peer_infos: HashMap<Pubkey, RepaireeInfo> = HashMap::new();
let _ = Self::recv_loop( let _ = Self::recv_loop(
&blocktree, &blockstore,
&mut peer_infos, &mut peer_infos,
&exit, &exit,
&cluster_info, &cluster_info,
@ -119,7 +119,7 @@ impl ClusterInfoRepairListener {
} }
fn recv_loop( fn recv_loop(
blocktree: &Blocktree, blockstore: &Blockstore,
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>, peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
@ -134,7 +134,7 @@ impl ClusterInfoRepairListener {
return Ok(()); return Ok(());
} }
let lowest_slot = blocktree.lowest_slot(); let lowest_slot = blockstore.lowest_slot();
let peers = cluster_info.read().unwrap().gossip_peers(); let peers = cluster_info.read().unwrap().gossip_peers();
let mut peers_needing_repairs: HashMap<Pubkey, EpochSlots> = HashMap::new(); let mut peers_needing_repairs: HashMap<Pubkey, EpochSlots> = HashMap::new();
@ -156,7 +156,7 @@ impl ClusterInfoRepairListener {
// After updating all the peers, send out repairs to those that need it // After updating all the peers, send out repairs to those that need it
let _ = Self::serve_repairs( let _ = Self::serve_repairs(
&my_pubkey, &my_pubkey,
blocktree, blockstore,
peer_infos, peer_infos,
&peers_needing_repairs, &peers_needing_repairs,
&socket, &socket,
@ -219,7 +219,7 @@ impl ClusterInfoRepairListener {
fn serve_repairs( fn serve_repairs(
my_pubkey: &Pubkey, my_pubkey: &Pubkey,
blocktree: &Blocktree, blockstore: &Blockstore,
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>, peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
repairees: &HashMap<Pubkey, EpochSlots>, repairees: &HashMap<Pubkey, EpochSlots>,
socket: &UdpSocket, socket: &UdpSocket,
@ -258,7 +258,7 @@ impl ClusterInfoRepairListener {
my_pubkey, my_pubkey,
repairee_pubkey, repairee_pubkey,
my_root, my_root,
blocktree, blockstore,
&repairee_epoch_slots, &repairee_epoch_slots,
&eligible_repairmen, &eligible_repairmen,
socket, socket,
@ -286,7 +286,7 @@ impl ClusterInfoRepairListener {
my_pubkey: &Pubkey, my_pubkey: &Pubkey,
repairee_pubkey: &Pubkey, repairee_pubkey: &Pubkey,
my_root: Slot, my_root: Slot,
blocktree: &Blocktree, blockstore: &Blockstore,
repairee_epoch_slots: &EpochSlots, repairee_epoch_slots: &EpochSlots,
eligible_repairmen: &[&Pubkey], eligible_repairmen: &[&Pubkey],
socket: &UdpSocket, socket: &UdpSocket,
@ -295,7 +295,7 @@ impl ClusterInfoRepairListener {
epoch_schedule: &EpochSchedule, epoch_schedule: &EpochSchedule,
last_repaired_slot_and_ts: (u64, u64), last_repaired_slot_and_ts: (u64, u64),
) -> Result<Option<Slot>> { ) -> Result<Option<Slot>> {
let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blocktree); let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blockstore);
if slot_iter.is_err() { if slot_iter.is_err() {
info!( info!(
"Root for repairee is on different fork. My root: {}, repairee_root: {} repairee_pubkey: {:?}", "Root for repairee is on different fork. My root: {}, repairee_root: {} repairee_pubkey: {:?}",
@ -366,17 +366,17 @@ impl ClusterInfoRepairListener {
// a database iterator over the slots because by the time this node is // a database iterator over the slots because by the time this node is
// sending the shreds in this slot for repair, we expect these slots // sending the shreds in this slot for repair, we expect these slots
// to be full. // to be full.
if let Some(shred_data) = blocktree if let Some(shred_data) = blockstore
.get_data_shred(slot, shred_index as u64) .get_data_shred(slot, shred_index as u64)
.expect("Failed to read data shred from blocktree") .expect("Failed to read data shred from blockstore")
{ {
socket.send_to(&shred_data[..], repairee_addr)?; socket.send_to(&shred_data[..], repairee_addr)?;
total_data_shreds_sent += 1; total_data_shreds_sent += 1;
} }
if let Some(coding_bytes) = blocktree if let Some(coding_bytes) = blockstore
.get_coding_shred(slot, shred_index as u64) .get_coding_shred(slot, shred_index as u64)
.expect("Failed to read coding shred from blocktree") .expect("Failed to read coding shred from blockstore")
{ {
socket.send_to(&coding_bytes[..], repairee_addr)?; socket.send_to(&coding_bytes[..], repairee_addr)?;
total_coding_shreds_sent += 1; total_coding_shreds_sent += 1;
@ -550,7 +550,7 @@ mod tests {
use crate::packet::Packets; use crate::packet::Packets;
use crate::streamer; use crate::streamer;
use crate::streamer::PacketReceiver; use crate::streamer::PacketReceiver;
use solana_ledger::blocktree::make_many_slot_entries; use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path; use solana_ledger::get_tmp_ledger_path;
use solana_perf::recycler::Recycler; use solana_perf::recycler::Recycler;
use std::collections::BTreeSet; use std::collections::BTreeSet;
@ -699,16 +699,16 @@ mod tests {
#[test] #[test]
fn test_serve_same_repairs_to_repairee() { fn test_serve_same_repairs_to_repairee() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_slots = 2; let num_slots = 2;
let (shreds, _) = make_many_slot_entries(0, num_slots, 1); let (shreds, _) = make_many_slot_entries(0, num_slots, 1);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman // Write roots so that these slots will qualify to be sent by the repairman
let last_root = num_slots - 1; let last_root = num_slots - 1;
let roots: Vec<_> = (0..=last_root).collect(); let roots: Vec<_> = (0..=last_root).collect();
blocktree.set_roots(&roots).unwrap(); blockstore.set_roots(&roots).unwrap();
// Set up my information // Set up my information
let my_pubkey = Pubkey::new_rand(); let my_pubkey = Pubkey::new_rand();
@ -729,7 +729,7 @@ mod tests {
&my_pubkey, &my_pubkey,
&mock_repairee.id, &mock_repairee.id,
num_slots - 1, num_slots - 1,
&blocktree, &blockstore,
&repairee_epoch_slots, &repairee_epoch_slots,
&eligible_repairmen, &eligible_repairmen,
&my_socket, &my_socket,
@ -749,7 +749,7 @@ mod tests {
&my_pubkey, &my_pubkey,
&mock_repairee.id, &mock_repairee.id,
num_slots - 1, num_slots - 1,
&blocktree, &blockstore,
&repairee_epoch_slots, &repairee_epoch_slots,
&eligible_repairmen, &eligible_repairmen,
&my_socket, &my_socket,
@ -765,20 +765,20 @@ mod tests {
#[test] #[test]
fn test_serve_repairs_to_repairee() { fn test_serve_repairs_to_repairee() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let entries_per_slot = 5; let entries_per_slot = 5;
let num_slots = 10; let num_slots = 10;
assert_eq!(num_slots % 2, 0); assert_eq!(num_slots % 2, 0);
let (shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot); let (shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
let num_shreds_per_slot = shreds.len() as u64 / num_slots; let num_shreds_per_slot = shreds.len() as u64 / num_slots;
// Write slots in the range [0, num_slots] to blocktree // Write slots in the range [0, num_slots] to blockstore
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman // Write roots so that these slots will qualify to be sent by the repairman
let roots: Vec<_> = (0..=num_slots - 1).collect(); let roots: Vec<_> = (0..=num_slots - 1).collect();
blocktree.set_roots(&roots).unwrap(); blockstore.set_roots(&roots).unwrap();
// Set up my information // Set up my information
let my_pubkey = Pubkey::new_rand(); let my_pubkey = Pubkey::new_rand();
@ -809,7 +809,7 @@ mod tests {
&repairman_pubkey, &repairman_pubkey,
&mock_repairee.id, &mock_repairee.id,
num_slots - 1, num_slots - 1,
&blocktree, &blockstore,
&repairee_epoch_slots, &repairee_epoch_slots,
&eligible_repairmen_refs, &eligible_repairmen_refs,
&my_socket, &my_socket,
@ -848,26 +848,26 @@ mod tests {
// Shutdown // Shutdown
mock_repairee.close().unwrap(); mock_repairee.close().unwrap();
drop(blocktree); drop(blockstore);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
fn test_no_repair_past_confirmed_epoch() { fn test_no_repair_past_confirmed_epoch() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let stakers_slot_offset = 16; let stakers_slot_offset = 16;
let slots_per_epoch = stakers_slot_offset * 2; let slots_per_epoch = stakers_slot_offset * 2;
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, false); let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, false);
// Create shreds for first two epochs and write them to blocktree // Create shreds for first two epochs and write them to blockstore
let total_slots = slots_per_epoch * 2; let total_slots = slots_per_epoch * 2;
let (shreds, _) = make_many_slot_entries(0, total_slots, 1); let (shreds, _) = make_many_slot_entries(0, total_slots, 1);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman // Write roots so that these slots will qualify to be sent by the repairman
let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect(); let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect();
blocktree.set_roots(&roots).unwrap(); blockstore.set_roots(&roots).unwrap();
// Set up my information // Set up my information
let my_pubkey = Pubkey::new_rand(); let my_pubkey = Pubkey::new_rand();
@ -896,7 +896,7 @@ mod tests {
&my_pubkey, &my_pubkey,
&mock_repairee.id, &mock_repairee.id,
total_slots - 1, total_slots - 1,
&blocktree, &blockstore,
&repairee_epoch_slots, &repairee_epoch_slots,
&vec![&my_pubkey], &vec![&my_pubkey],
&my_socket, &my_socket,
@ -919,7 +919,7 @@ mod tests {
&my_pubkey, &my_pubkey,
&mock_repairee.id, &mock_repairee.id,
total_slots - 1, total_slots - 1,
&blocktree, &blockstore,
&repairee_epoch_slots, &repairee_epoch_slots,
&vec![&my_pubkey], &vec![&my_pubkey],
&my_socket, &my_socket,
@ -936,8 +936,8 @@ mod tests {
// Shutdown // Shutdown
mock_repairee.close().unwrap(); mock_repairee.close().unwrap();
drop(blocktree); drop(blockstore);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]

View File

@ -6,7 +6,7 @@ use crate::streamer;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use solana_client::thin_client::{create_client, ThinClient}; use solana_client::thin_client::{create_client, ThinClient};
use solana_ledger::bank_forks::BankForks; use solana_ledger::bank_forks::BankForks;
use solana_ledger::blocktree::Blocktree; use solana_ledger::blockstore::Blockstore;
use solana_perf::recycler::Recycler; use solana_perf::recycler::Recycler;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
@ -24,7 +24,7 @@ pub struct GossipService {
impl GossipService { impl GossipService {
pub fn new( pub fn new(
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
blocktree: Option<Arc<Blocktree>>, blockstore: Option<Arc<Blockstore>>,
bank_forks: Option<Arc<RwLock<BankForks>>>, bank_forks: Option<Arc<RwLock<BankForks>>>,
gossip_socket: UdpSocket, gossip_socket: UdpSocket,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
@ -47,7 +47,7 @@ impl GossipService {
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver); let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);
let t_listen = ClusterInfo::listen( let t_listen = ClusterInfo::listen(
cluster_info.clone(), cluster_info.clone(),
blocktree, blockstore,
bank_forks.clone(), bank_forks.clone(),
request_receiver, request_receiver,
response_sender.clone(), response_sender.clone(),

View File

@ -1,6 +1,6 @@
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage //! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blocktree::Blocktree; use solana_ledger::blockstore::Blockstore;
use solana_metrics::datapoint_debug; use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot; use solana_sdk::clock::Slot;
use std::string::ToString; use std::string::ToString;
@ -27,7 +27,7 @@ pub struct LedgerCleanupService {
impl LedgerCleanupService { impl LedgerCleanupService {
pub fn new( pub fn new(
new_root_receiver: Receiver<Slot>, new_root_receiver: Receiver<Slot>,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
max_ledger_slots: u64, max_ledger_slots: u64,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
) -> Self { ) -> Self {
@ -45,7 +45,7 @@ impl LedgerCleanupService {
} }
if let Err(e) = Self::cleanup_ledger( if let Err(e) = Self::cleanup_ledger(
&new_root_receiver, &new_root_receiver,
&blocktree, &blockstore,
max_ledger_slots, max_ledger_slots,
&mut next_purge_batch, &mut next_purge_batch,
) { ) {
@ -61,20 +61,20 @@ impl LedgerCleanupService {
fn cleanup_ledger( fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>, new_root_receiver: &Receiver<Slot>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
max_ledger_slots: u64, max_ledger_slots: u64,
next_purge_batch: &mut u64, next_purge_batch: &mut u64,
) -> Result<(), RecvTimeoutError> { ) -> Result<(), RecvTimeoutError> {
let disk_utilization_pre = blocktree.storage_size(); let disk_utilization_pre = blockstore.storage_size();
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?; let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
if root > *next_purge_batch { if root > *next_purge_batch {
//cleanup //cleanup
blocktree.purge_slots(0, Some(root - max_ledger_slots)); blockstore.purge_slots(0, Some(root - max_ledger_slots));
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE; *next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
} }
let disk_utilization_post = blocktree.storage_size(); let disk_utilization_post = blockstore.storage_size();
if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) = if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) =
(disk_utilization_pre, disk_utilization_post) (disk_utilization_pre, disk_utilization_post)
@ -101,39 +101,39 @@ impl LedgerCleanupService {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use solana_ledger::blocktree::make_many_slot_entries; use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path; use solana_ledger::get_tmp_ledger_path;
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
#[test] #[test]
fn test_cleanup() { fn test_cleanup() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5); let (shreds, _) = make_many_slot_entries(0, 50, 5);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
let blocktree = Arc::new(blocktree); let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel(); let (sender, receiver) = channel();
//send a signal to kill slots 0-40 //send a signal to kill slots 0-40
let mut next_purge_slot = 0; let mut next_purge_slot = 0;
sender.send(50).unwrap(); sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blocktree, 10, &mut next_purge_slot) LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot)
.unwrap(); .unwrap();
//check that 0-40 don't exist //check that 0-40 don't exist
blocktree blockstore
.slot_meta_iterator(0) .slot_meta_iterator(0)
.unwrap() .unwrap()
.for_each(|(slot, _)| assert!(slot > 40)); .for_each(|(slot, _)| assert!(slot > 40));
drop(blocktree); drop(blockstore);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
fn test_compaction() { fn test_compaction() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
let n = 10_000; let n = 10_000;
let batch_size = 100; let batch_size = 100;
@ -142,10 +142,10 @@ mod tests {
for i in 0..batches { for i in 0..batches {
let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1); let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
} }
let u1 = blocktree.storage_size().unwrap() as f64; let u1 = blockstore.storage_size().unwrap() as f64;
// send signal to cleanup slots // send signal to cleanup slots
let (sender, receiver) = channel(); let (sender, receiver) = channel();
@ -153,7 +153,7 @@ mod tests {
let mut next_purge_batch = 0; let mut next_purge_batch = 0;
LedgerCleanupService::cleanup_ledger( LedgerCleanupService::cleanup_ledger(
&receiver, &receiver,
&blocktree, &blockstore,
max_ledger_slots, max_ledger_slots,
&mut next_purge_batch, &mut next_purge_batch,
) )
@ -161,18 +161,18 @@ mod tests {
thread::sleep(Duration::from_secs(2)); thread::sleep(Duration::from_secs(2));
let u2 = blocktree.storage_size().unwrap() as f64; let u2 = blockstore.storage_size().unwrap() as f64;
assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,); assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,);
// check that early slots don't exist // check that early slots don't exist
let max_slot = n - max_ledger_slots; let max_slot = n - max_ledger_slots;
blocktree blockstore
.slot_meta_iterator(0) .slot_meta_iterator(0)
.unwrap() .unwrap()
.for_each(|(slot, _)| assert!(slot > max_slot)); .for_each(|(slot, _)| assert!(slot > max_slot));
drop(blocktree); drop(blockstore);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
} }

View File

@ -10,7 +10,7 @@
//! For Entries: //! For Entries:
//! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height //! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height
//! //!
use solana_ledger::blocktree::Blocktree; use solana_ledger::blockstore::Blockstore;
use solana_ledger::entry::Entry; use solana_ledger::entry::Entry;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache; use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::poh::Poh; use solana_ledger::poh::Poh;
@ -70,7 +70,7 @@ pub struct PohRecorder {
leader_last_tick_height: u64, // zero if none leader_last_tick_height: u64, // zero if none
grace_ticks: u64, grace_ticks: u64,
id: Pubkey, id: Pubkey,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
leader_schedule_cache: Arc<LeaderScheduleCache>, leader_schedule_cache: Arc<LeaderScheduleCache>,
poh_config: Arc<PohConfig>, poh_config: Arc<PohConfig>,
ticks_per_slot: u64, ticks_per_slot: u64,
@ -84,7 +84,7 @@ impl PohRecorder {
&self.id, &self.id,
bank.slot(), bank.slot(),
&bank, &bank,
Some(&self.blocktree), Some(&self.blockstore),
); );
assert_eq!(self.ticks_per_slot, bank.ticks_per_slot()); assert_eq!(self.ticks_per_slot, bank.ticks_per_slot());
let (leader_first_tick_height, leader_last_tick_height, grace_ticks) = let (leader_first_tick_height, leader_last_tick_height, grace_ticks) =
@ -407,7 +407,7 @@ impl PohRecorder {
next_leader_slot: Option<(Slot, Slot)>, next_leader_slot: Option<(Slot, Slot)>,
ticks_per_slot: u64, ticks_per_slot: u64,
id: &Pubkey, id: &Pubkey,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
clear_bank_signal: Option<SyncSender<bool>>, clear_bank_signal: Option<SyncSender<bool>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>, leader_schedule_cache: &Arc<LeaderScheduleCache>,
poh_config: &Arc<PohConfig>, poh_config: &Arc<PohConfig>,
@ -433,7 +433,7 @@ impl PohRecorder {
leader_last_tick_height, leader_last_tick_height,
grace_ticks, grace_ticks,
id: *id, id: *id,
blocktree: blocktree.clone(), blockstore: blockstore.clone(),
leader_schedule_cache: leader_schedule_cache.clone(), leader_schedule_cache: leader_schedule_cache.clone(),
ticks_per_slot, ticks_per_slot,
poh_config: poh_config.clone(), poh_config: poh_config.clone(),
@ -452,7 +452,7 @@ impl PohRecorder {
next_leader_slot: Option<(Slot, Slot)>, next_leader_slot: Option<(Slot, Slot)>,
ticks_per_slot: u64, ticks_per_slot: u64,
id: &Pubkey, id: &Pubkey,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>, leader_schedule_cache: &Arc<LeaderScheduleCache>,
poh_config: &Arc<PohConfig>, poh_config: &Arc<PohConfig>,
) -> (Self, Receiver<WorkingBankEntry>) { ) -> (Self, Receiver<WorkingBankEntry>) {
@ -463,7 +463,7 @@ impl PohRecorder {
next_leader_slot, next_leader_slot,
ticks_per_slot, ticks_per_slot,
id, id,
blocktree, blockstore,
None, None,
leader_schedule_cache, leader_schedule_cache,
poh_config, poh_config,
@ -475,7 +475,7 @@ impl PohRecorder {
mod tests { mod tests {
use super::*; use super::*;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx; use solana_perf::test_tx::test_tx;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT; use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_sdk::hash::hash; use solana_sdk::hash::hash;
@ -486,8 +486,8 @@ mod tests {
let prev_hash = Hash::default(); let prev_hash = Hash::default();
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new( let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0, 0,
@ -496,7 +496,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
DEFAULT_TICKS_PER_SLOT, DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()), &Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -505,7 +505,7 @@ mod tests {
assert_eq!(poh_recorder.tick_cache[0].1, 1); assert_eq!(poh_recorder.tick_cache[0].1, 1);
assert_eq!(poh_recorder.tick_height, 1); assert_eq!(poh_recorder.tick_height, 1);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -513,8 +513,8 @@ mod tests {
let prev_hash = Hash::default(); let prev_hash = Hash::default();
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new( let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0, 0,
@ -523,7 +523,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
DEFAULT_TICKS_PER_SLOT, DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()), &Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -533,15 +533,15 @@ mod tests {
assert_eq!(poh_recorder.tick_cache[1].1, 2); assert_eq!(poh_recorder.tick_cache[1].1, 2);
assert_eq!(poh_recorder.tick_height, 2); assert_eq!(poh_recorder.tick_height, 2);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_poh_recorder_reset_clears_cache() { fn test_poh_recorder_reset_clears_cache() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new( let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0, 0,
Hash::default(), Hash::default(),
@ -549,7 +549,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
DEFAULT_TICKS_PER_SLOT, DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()), &Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -558,15 +558,15 @@ mod tests {
poh_recorder.reset(Hash::default(), 0, Some((4, 4))); poh_recorder.reset(Hash::default(), 0, Some((4, 4)));
assert_eq!(poh_recorder.tick_cache.len(), 0); assert_eq!(poh_recorder.tick_cache.len(), 0);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_poh_recorder_clear() { fn test_poh_recorder_clear() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
@ -577,7 +577,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -592,15 +592,15 @@ mod tests {
poh_recorder.clear_bank(); poh_recorder.clear_bank();
assert!(poh_recorder.working_bank.is_none()); assert!(poh_recorder.working_bank.is_none());
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_poh_recorder_tick_sent_after_min() { fn test_poh_recorder_tick_sent_after_min() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
@ -611,7 +611,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -641,15 +641,15 @@ mod tests {
assert_eq!(num_entries, 3); assert_eq!(num_entries, 3);
assert!(poh_recorder.working_bank.is_none()); assert!(poh_recorder.working_bank.is_none());
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_poh_recorder_tick_sent_upto_and_including_max() { fn test_poh_recorder_tick_sent_upto_and_including_max() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
@ -660,7 +660,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -688,15 +688,15 @@ mod tests {
} }
assert_eq!(num_entries, 3); assert_eq!(num_entries, 3);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_poh_recorder_record_to_early() { fn test_poh_recorder_record_to_early() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
@ -707,7 +707,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -726,15 +726,15 @@ mod tests {
.is_err()); .is_err());
assert!(entry_receiver.try_recv().is_err()); assert!(entry_receiver.try_recv().is_err());
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_poh_recorder_record_bad_slot() { fn test_poh_recorder_record_bad_slot() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
@ -745,7 +745,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -766,15 +766,15 @@ mod tests {
Err(PohRecorderError::MaxHeightReached) Err(PohRecorderError::MaxHeightReached)
); );
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_poh_recorder_record_at_min_passes() { fn test_poh_recorder_record_at_min_passes() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
@ -785,7 +785,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -812,15 +812,15 @@ mod tests {
let (_bank, (e, _tick_height)) = entry_receiver.recv().expect("recv 2"); let (_bank, (e, _tick_height)) = entry_receiver.recv().expect("recv 2");
assert!(!e.is_tick()); assert!(!e.is_tick());
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_poh_recorder_record_at_max_fails() { fn test_poh_recorder_record_at_max_fails() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
@ -831,7 +831,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -856,15 +856,15 @@ mod tests {
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
assert!(entry.is_tick()); assert!(entry.is_tick());
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_poh_cache_on_disconnect() { fn test_poh_cache_on_disconnect() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
@ -875,7 +875,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -894,15 +894,15 @@ mod tests {
assert!(poh_recorder.working_bank.is_none()); assert!(poh_recorder.working_bank.is_none());
assert_eq!(poh_recorder.tick_cache.len(), 3); assert_eq!(poh_recorder.tick_cache.len(), 3);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_reset_current() { fn test_reset_current() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new( let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0, 0,
Hash::default(), Hash::default(),
@ -910,7 +910,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
DEFAULT_TICKS_PER_SLOT, DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()), &Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -921,15 +921,15 @@ mod tests {
poh_recorder.reset(hash, 0, Some((4, 4))); poh_recorder.reset(hash, 0, Some((4, 4)));
assert_eq!(poh_recorder.tick_cache.len(), 0); assert_eq!(poh_recorder.tick_cache.len(), 0);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_reset_with_cached() { fn test_reset_with_cached() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new( let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0, 0,
Hash::default(), Hash::default(),
@ -937,7 +937,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
DEFAULT_TICKS_PER_SLOT, DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()), &Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -947,7 +947,7 @@ mod tests {
poh_recorder.reset(poh_recorder.tick_cache[0].0.hash, 0, Some((4, 4))); poh_recorder.reset(poh_recorder.tick_cache[0].0.hash, 0, Some((4, 4)));
assert_eq!(poh_recorder.tick_cache.len(), 0); assert_eq!(poh_recorder.tick_cache.len(), 0);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -956,8 +956,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new( let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0, 0,
Hash::default(), Hash::default(),
@ -965,7 +965,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
DEFAULT_TICKS_PER_SLOT, DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()), &Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -980,15 +980,15 @@ mod tests {
poh_recorder.tick(); poh_recorder.tick();
assert_eq!(poh_recorder.tick_height, DEFAULT_TICKS_PER_SLOT + 1); assert_eq!(poh_recorder.tick_height, DEFAULT_TICKS_PER_SLOT + 1);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_reset_clear_bank() { fn test_reset_clear_bank() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let (mut poh_recorder, _entry_receiver) = PohRecorder::new( let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
@ -998,7 +998,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -1011,15 +1011,15 @@ mod tests {
poh_recorder.reset(hash(b"hello"), 0, Some((4, 4))); poh_recorder.reset(hash(b"hello"), 0, Some((4, 4)));
assert!(poh_recorder.working_bank.is_none()); assert!(poh_recorder.working_bank.is_none());
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
pub fn test_clear_signal() { pub fn test_clear_signal() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let (sender, receiver) = sync_channel(1); let (sender, receiver) = sync_channel(1);
@ -1030,7 +1030,7 @@ mod tests {
None, None,
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
Some(sender), Some(sender),
&Arc::new(LeaderScheduleCache::default()), &Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
@ -1039,7 +1039,7 @@ mod tests {
poh_recorder.clear_bank(); poh_recorder.clear_bank();
assert!(receiver.try_recv().is_ok()); assert!(receiver.try_recv().is_ok());
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -1047,8 +1047,8 @@ mod tests {
solana_logger::setup(); solana_logger::setup();
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let ticks_per_slot = 5; let ticks_per_slot = 5;
let GenesisConfigInfo { let GenesisConfigInfo {
mut genesis_config, .. mut genesis_config, ..
@ -1064,7 +1064,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -1091,7 +1091,7 @@ mod tests {
// Make sure the starting slot is updated // Make sure the starting slot is updated
assert_eq!(poh_recorder.start_slot, end_slot); assert_eq!(poh_recorder.start_slot, end_slot);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
@ -1100,8 +1100,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
@ -1112,7 +1112,7 @@ mod tests {
None, None,
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -1213,15 +1213,15 @@ mod tests {
assert_eq!(grace_ticks, overshoot_factor * bank.ticks_per_slot()); assert_eq!(grace_ticks, overshoot_factor * bank.ticks_per_slot());
assert_eq!(leader_slot, 9); assert_eq!(leader_slot, 9);
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]
fn test_would_be_leader_soon() { fn test_would_be_leader_soon() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
@ -1232,7 +1232,7 @@ mod tests {
None, None,
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );
@ -1287,8 +1287,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
// test that virtual ticks are flushed into a newly set bank asap // test that virtual ticks are flushed into a newly set bank asap
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let genesis_hash = bank.last_blockhash(); let genesis_hash = bank.last_blockhash();
@ -1300,7 +1300,7 @@ mod tests {
Some((2, 2)), Some((2, 2)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
); );

View File

@ -123,7 +123,7 @@ mod tests {
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use crate::poh_recorder::WorkingBank; use crate::poh_recorder::WorkingBank;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache; use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx; use solana_perf::test_tx::test_tx;
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::hash::hash; use solana_sdk::hash::hash;
@ -137,8 +137,8 @@ mod tests {
let prev_hash = bank.last_blockhash(); let prev_hash = bank.last_blockhash();
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = let blockstore = Blockstore::open(&ledger_path)
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let poh_config = Arc::new(PohConfig { let poh_config = Arc::new(PohConfig {
hashes_per_tick: Some(2), hashes_per_tick: Some(2),
target_tick_duration: Duration::from_millis(42), target_tick_duration: Duration::from_millis(42),
@ -151,7 +151,7 @@ mod tests {
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::default(), &Pubkey::default(),
&Arc::new(blocktree), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&poh_config, &poh_config,
); );
@ -230,6 +230,6 @@ mod tests {
let _ = poh_service.join().unwrap(); let _ = poh_service.join().unwrap();
let _ = entry_producer.join().unwrap(); let _ = entry_producer.join().unwrap();
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
} }

View File

@ -6,7 +6,7 @@ use crate::{
}; };
use solana_ledger::{ use solana_ledger::{
bank_forks::BankForks, bank_forks::BankForks,
blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta}, blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta},
}; };
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey}; use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{ use std::{
@ -71,7 +71,7 @@ pub struct RepairService {
impl RepairService { impl RepairService {
pub fn new( pub fn new(
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
repair_socket: Arc<UdpSocket>, repair_socket: Arc<UdpSocket>,
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
@ -81,7 +81,7 @@ impl RepairService {
RepairStrategy::RepairAll { RepairStrategy::RepairAll {
ref epoch_schedule, .. ref epoch_schedule, ..
} => Some(ClusterInfoRepairListener::new( } => Some(ClusterInfoRepairListener::new(
&blocktree, &blockstore,
&exit, &exit,
cluster_info.clone(), cluster_info.clone(),
*epoch_schedule, *epoch_schedule,
@ -94,7 +94,7 @@ impl RepairService {
.name("solana-repair-service".to_string()) .name("solana-repair-service".to_string())
.spawn(move || { .spawn(move || {
Self::run( Self::run(
&blocktree, &blockstore,
&exit, &exit,
&repair_socket, &repair_socket,
&cluster_info, &cluster_info,
@ -110,7 +110,7 @@ impl RepairService {
} }
fn run( fn run(
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
repair_socket: &Arc<UdpSocket>, repair_socket: &Arc<UdpSocket>,
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
@ -123,10 +123,10 @@ impl RepairService {
ref epoch_schedule, .. ref epoch_schedule, ..
} = repair_strategy } = repair_strategy
{ {
current_root = blocktree.last_root(); current_root = blockstore.last_root();
Self::initialize_epoch_slots( Self::initialize_epoch_slots(
id, id,
blocktree, blockstore,
&mut epoch_slots, &mut epoch_slots,
current_root, current_root,
epoch_schedule, epoch_schedule,
@ -143,7 +143,7 @@ impl RepairService {
RepairStrategy::RepairRange(ref repair_slot_range) => { RepairStrategy::RepairRange(ref repair_slot_range) => {
// Strategy used by archivers // Strategy used by archivers
Self::generate_repairs_in_range( Self::generate_repairs_in_range(
blocktree, blockstore,
MAX_REPAIR_LENGTH, MAX_REPAIR_LENGTH,
repair_slot_range, repair_slot_range,
) )
@ -153,8 +153,8 @@ impl RepairService {
ref completed_slots_receiver, ref completed_slots_receiver,
.. ..
} => { } => {
let new_root = blocktree.last_root(); let new_root = blockstore.last_root();
let lowest_slot = blocktree.lowest_slot(); let lowest_slot = blockstore.lowest_slot();
Self::update_epoch_slots( Self::update_epoch_slots(
id, id,
new_root, new_root,
@ -164,7 +164,7 @@ impl RepairService {
&cluster_info, &cluster_info,
completed_slots_receiver, completed_slots_receiver,
); );
Self::generate_repairs(blocktree, new_root, MAX_REPAIR_LENGTH) Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH)
} }
} }
}; };
@ -195,7 +195,7 @@ impl RepairService {
// Generate repairs for all slots `x` in the repair_range.start <= x <= repair_range.end // Generate repairs for all slots `x` in the repair_range.start <= x <= repair_range.end
pub fn generate_repairs_in_range( pub fn generate_repairs_in_range(
blocktree: &Blocktree, blockstore: &Blockstore,
max_repairs: usize, max_repairs: usize,
repair_range: &RepairSlotRange, repair_range: &RepairSlotRange,
) -> Result<Vec<RepairType>> { ) -> Result<Vec<RepairType>> {
@ -206,7 +206,7 @@ impl RepairService {
break; break;
} }
let meta = blocktree let meta = blockstore
.meta(slot) .meta(slot)
.expect("Unable to lookup slot meta") .expect("Unable to lookup slot meta")
.unwrap_or(SlotMeta { .unwrap_or(SlotMeta {
@ -215,7 +215,7 @@ impl RepairService {
}); });
let new_repairs = Self::generate_repairs_for_slot( let new_repairs = Self::generate_repairs_for_slot(
blocktree, blockstore,
slot, slot,
&meta, &meta,
max_repairs - repairs.len(), max_repairs - repairs.len(),
@ -227,18 +227,18 @@ impl RepairService {
} }
fn generate_repairs( fn generate_repairs(
blocktree: &Blocktree, blockstore: &Blockstore,
root: Slot, root: Slot,
max_repairs: usize, max_repairs: usize,
) -> Result<Vec<RepairType>> { ) -> Result<Vec<RepairType>> {
// Slot height and shred indexes for shreds we want to repair // Slot height and shred indexes for shreds we want to repair
let mut repairs: Vec<RepairType> = vec![]; let mut repairs: Vec<RepairType> = vec![];
Self::generate_repairs_for_fork(blocktree, &mut repairs, max_repairs, root); Self::generate_repairs_for_fork(blockstore, &mut repairs, max_repairs, root);
// TODO: Incorporate gossip to determine priorities for repair? // TODO: Incorporate gossip to determine priorities for repair?
// Try to resolve orphans in blocktree // Try to resolve orphans in blockstore
let mut orphans = blocktree.get_orphans(Some(MAX_ORPHANS)); let mut orphans = blockstore.get_orphans(Some(MAX_ORPHANS));
orphans.retain(|x| *x > root); orphans.retain(|x| *x > root);
Self::generate_repairs_for_orphans(&orphans[..], &mut repairs); Self::generate_repairs_for_orphans(&orphans[..], &mut repairs);
@ -246,7 +246,7 @@ impl RepairService {
} }
fn generate_repairs_for_slot( fn generate_repairs_for_slot(
blocktree: &Blocktree, blockstore: &Blockstore,
slot: Slot, slot: Slot,
slot_meta: &SlotMeta, slot_meta: &SlotMeta,
max_repairs: usize, max_repairs: usize,
@ -256,7 +256,7 @@ impl RepairService {
} else if slot_meta.consumed == slot_meta.received { } else if slot_meta.consumed == slot_meta.received {
vec![RepairType::HighestShred(slot, slot_meta.received)] vec![RepairType::HighestShred(slot, slot_meta.received)]
} else { } else {
let reqs = blocktree.find_missing_data_indexes( let reqs = blockstore.find_missing_data_indexes(
slot, slot,
slot_meta.first_shred_timestamp, slot_meta.first_shred_timestamp,
slot_meta.consumed, slot_meta.consumed,
@ -275,7 +275,7 @@ impl RepairService {
/// Repairs any fork starting at the input slot /// Repairs any fork starting at the input slot
fn generate_repairs_for_fork( fn generate_repairs_for_fork(
blocktree: &Blocktree, blockstore: &Blockstore,
repairs: &mut Vec<RepairType>, repairs: &mut Vec<RepairType>,
max_repairs: usize, max_repairs: usize,
slot: Slot, slot: Slot,
@ -283,9 +283,9 @@ impl RepairService {
let mut pending_slots = vec![slot]; let mut pending_slots = vec![slot];
while repairs.len() < max_repairs && !pending_slots.is_empty() { while repairs.len() < max_repairs && !pending_slots.is_empty() {
let slot = pending_slots.pop().unwrap(); let slot = pending_slots.pop().unwrap();
if let Some(slot_meta) = blocktree.meta(slot).unwrap() { if let Some(slot_meta) = blockstore.meta(slot).unwrap() {
let new_repairs = Self::generate_repairs_for_slot( let new_repairs = Self::generate_repairs_for_slot(
blocktree, blockstore,
slot, slot,
&slot_meta, &slot_meta,
max_repairs - repairs.len(), max_repairs - repairs.len(),
@ -300,7 +300,7 @@ impl RepairService {
} }
fn get_completed_slots_past_root( fn get_completed_slots_past_root(
blocktree: &Blocktree, blockstore: &Blockstore,
slots_in_gossip: &mut BTreeSet<Slot>, slots_in_gossip: &mut BTreeSet<Slot>,
root: Slot, root: Slot,
epoch_schedule: &EpochSchedule, epoch_schedule: &EpochSchedule,
@ -308,7 +308,7 @@ impl RepairService {
let last_confirmed_epoch = epoch_schedule.get_leader_schedule_epoch(root); let last_confirmed_epoch = epoch_schedule.get_leader_schedule_epoch(root);
let last_epoch_slot = epoch_schedule.get_last_slot_in_epoch(last_confirmed_epoch); let last_epoch_slot = epoch_schedule.get_last_slot_in_epoch(last_confirmed_epoch);
let meta_iter = blocktree let meta_iter = blockstore
.slot_meta_iterator(root + 1) .slot_meta_iterator(root + 1)
.expect("Couldn't get db iterator"); .expect("Couldn't get db iterator");
@ -324,22 +324,22 @@ impl RepairService {
fn initialize_epoch_slots( fn initialize_epoch_slots(
id: Pubkey, id: Pubkey,
blocktree: &Blocktree, blockstore: &Blockstore,
slots_in_gossip: &mut BTreeSet<Slot>, slots_in_gossip: &mut BTreeSet<Slot>,
root: Slot, root: Slot,
epoch_schedule: &EpochSchedule, epoch_schedule: &EpochSchedule,
cluster_info: &RwLock<ClusterInfo>, cluster_info: &RwLock<ClusterInfo>,
) { ) {
Self::get_completed_slots_past_root(blocktree, slots_in_gossip, root, epoch_schedule); Self::get_completed_slots_past_root(blockstore, slots_in_gossip, root, epoch_schedule);
// Safe to set into gossip because by this time, the leader schedule cache should // Safe to set into gossip because by this time, the leader schedule cache should
// also be updated with the latest root (done in blocktree_processor) and thus // also be updated with the latest root (done in blockstore_processor) and thus
// will provide a schedule to window_service for any incoming shreds up to the // will provide a schedule to window_service for any incoming shreds up to the
// last_confirmed_epoch. // last_confirmed_epoch.
cluster_info.write().unwrap().push_epoch_slots( cluster_info.write().unwrap().push_epoch_slots(
id, id,
root, root,
blocktree.lowest_slot(), blockstore.lowest_slot(),
slots_in_gossip.clone(), slots_in_gossip.clone(),
); );
} }
@ -409,60 +409,60 @@ mod test {
use itertools::Itertools; use itertools::Itertools;
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use solana_ledger::blocktree::{ use solana_ledger::blockstore::{
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries, make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
}; };
use solana_ledger::shred::max_ticks_per_n_shreds; use solana_ledger::shred::max_ticks_per_n_shreds;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path}; use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
use std::thread::Builder; use std::thread::Builder;
#[test] #[test]
pub fn test_repair_orphan() { pub fn test_repair_orphan() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
{ {
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Create some orphan slots // Create some orphan slots
let (mut shreds, _) = make_slot_entries(1, 0, 1); let (mut shreds, _) = make_slot_entries(1, 0, 1);
let (shreds2, _) = make_slot_entries(5, 2, 1); let (shreds2, _) = make_slot_entries(5, 2, 1);
shreds.extend(shreds2); shreds.extend(shreds2);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!( assert_eq!(
RepairService::generate_repairs(&blocktree, 0, 2).unwrap(), RepairService::generate_repairs(&blockstore, 0, 2).unwrap(),
vec![RepairType::HighestShred(0, 0), RepairType::Orphan(2)] vec![RepairType::HighestShred(0, 0), RepairType::Orphan(2)]
); );
} }
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
pub fn test_repair_empty_slot() { pub fn test_repair_empty_slot() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
{ {
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_slot_entries(2, 0, 1); let (shreds, _) = make_slot_entries(2, 0, 1);
// Write this shred to slot 2, should chain to slot 0, which we haven't received // Write this shred to slot 2, should chain to slot 0, which we haven't received
// any shreds for // any shreds for
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
// Check that repair tries to patch the empty slot // Check that repair tries to patch the empty slot
assert_eq!( assert_eq!(
RepairService::generate_repairs(&blocktree, 0, 2).unwrap(), RepairService::generate_repairs(&blockstore, 0, 2).unwrap(),
vec![RepairType::HighestShred(0, 0)] vec![RepairType::HighestShred(0, 0)]
); );
} }
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
pub fn test_generate_repairs() { pub fn test_generate_repairs() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
{ {
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let nth = 3; let nth = 3;
let num_slots = 2; let num_slots = 2;
@ -483,7 +483,7 @@ mod test {
missing_indexes_per_slot.insert(0, index); missing_indexes_per_slot.insert(0, index);
} }
} }
blocktree blockstore
.insert_shreds(shreds_to_write, None, false) .insert_shreds(shreds_to_write, None, false)
.unwrap(); .unwrap();
// sleep so that the holes are ready for repair // sleep so that the holes are ready for repair
@ -497,23 +497,23 @@ mod test {
.collect(); .collect();
assert_eq!( assert_eq!(
RepairService::generate_repairs(&blocktree, 0, std::usize::MAX).unwrap(), RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(),
expected expected
); );
assert_eq!( assert_eq!(
RepairService::generate_repairs(&blocktree, 0, expected.len() - 2).unwrap()[..], RepairService::generate_repairs(&blockstore, 0, expected.len() - 2).unwrap()[..],
expected[0..expected.len() - 2] expected[0..expected.len() - 2]
); );
} }
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
pub fn test_generate_highest_repair() { pub fn test_generate_highest_repair() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
{ {
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 100; let num_entries_per_slot = 100;
@ -524,25 +524,25 @@ mod test {
// Remove last shred (which is also last in slot) so that slot is not complete // Remove last shred (which is also last in slot) so that slot is not complete
shreds.pop(); shreds.pop();
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
// We didn't get the last shred for this slot, so ask for the highest shred for that slot // We didn't get the last shred for this slot, so ask for the highest shred for that slot
let expected: Vec<RepairType> = let expected: Vec<RepairType> =
vec![RepairType::HighestShred(0, num_shreds_per_slot - 1)]; vec![RepairType::HighestShred(0, num_shreds_per_slot - 1)];
assert_eq!( assert_eq!(
RepairService::generate_repairs(&blocktree, 0, std::usize::MAX).unwrap(), RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(),
expected expected
); );
} }
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
pub fn test_repair_range() { pub fn test_repair_range() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
{ {
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let slots: Vec<u64> = vec![1, 3, 5, 7, 8]; let slots: Vec<u64> = vec![1, 3, 5, 7, 8];
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1; let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
@ -550,7 +550,7 @@ mod test {
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot); let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
for (mut slot_shreds, _) in shreds.into_iter() { for (mut slot_shreds, _) in shreds.into_iter() {
slot_shreds.remove(0); slot_shreds.remove(0);
blocktree.insert_shreds(slot_shreds, None, false).unwrap(); blockstore.insert_shreds(slot_shreds, None, false).unwrap();
} }
// sleep to make slot eligible for repair // sleep to make slot eligible for repair
sleep(Duration::from_secs(1)); sleep(Duration::from_secs(1));
@ -574,7 +574,7 @@ mod test {
assert_eq!( assert_eq!(
RepairService::generate_repairs_in_range( RepairService::generate_repairs_in_range(
&blocktree, &blockstore,
std::usize::MAX, std::usize::MAX,
&repair_slot_range &repair_slot_range
) )
@ -584,14 +584,14 @@ mod test {
} }
} }
} }
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
pub fn test_repair_range_highest() { pub fn test_repair_range_highest() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
{ {
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 10; let num_entries_per_slot = 10;
@ -603,7 +603,7 @@ mod test {
let parent = if i > 0 { i - 1 } else { 0 }; let parent = if i > 0 { i - 1 } else { 0 };
let (shreds, _) = make_slot_entries(i, parent, num_entries_per_slot as u64); let (shreds, _) = make_slot_entries(i, parent, num_entries_per_slot as u64);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
} }
let end = 4; let end = 4;
@ -619,7 +619,7 @@ mod test {
assert_eq!( assert_eq!(
RepairService::generate_repairs_in_range( RepairService::generate_repairs_in_range(
&blocktree, &blockstore,
std::usize::MAX, std::usize::MAX,
&repair_slot_range &repair_slot_range
) )
@ -627,14 +627,14 @@ mod test {
expected expected
); );
} }
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
pub fn test_get_completed_slots_past_root() { pub fn test_get_completed_slots_past_root() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
{ {
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 10; let num_entries_per_slot = 10;
let root = 10; let root = 10;
@ -656,8 +656,8 @@ mod test {
.collect(); .collect();
let mut full_slots = BTreeSet::new(); let mut full_slots = BTreeSet::new();
blocktree.insert_shreds(fork1_shreds, None, false).unwrap(); blockstore.insert_shreds(fork1_shreds, None, false).unwrap();
blocktree blockstore
.insert_shreds(fork2_incomplete_shreds, None, false) .insert_shreds(fork2_incomplete_shreds, None, false)
.unwrap(); .unwrap();
@ -665,7 +665,7 @@ mod test {
let epoch_schedule = EpochSchedule::custom(32, 32, false); let epoch_schedule = EpochSchedule::custom(32, 32, false);
RepairService::get_completed_slots_past_root( RepairService::get_completed_slots_past_root(
&blocktree, &blockstore,
&mut full_slots, &mut full_slots,
root, root,
&epoch_schedule, &epoch_schedule,
@ -682,9 +682,9 @@ mod test {
.into_iter() .into_iter()
.flat_map(|(shreds, _)| shreds) .flat_map(|(shreds, _)| shreds)
.collect(); .collect();
blocktree.insert_shreds(fork3_shreds, None, false).unwrap(); blockstore.insert_shreds(fork3_shreds, None, false).unwrap();
RepairService::get_completed_slots_past_root( RepairService::get_completed_slots_past_root(
&blocktree, &blockstore,
&mut full_slots, &mut full_slots,
root, root,
&epoch_schedule, &epoch_schedule,
@ -692,25 +692,25 @@ mod test {
expected.insert(last_slot); expected.insert(last_slot);
assert_eq!(full_slots, expected); assert_eq!(full_slots, expected);
} }
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
pub fn test_update_epoch_slots() { pub fn test_update_epoch_slots() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
{ {
// Create blocktree // Create blockstore
let (blocktree, _, completed_slots_receiver) = let (blockstore, _, completed_slots_receiver) =
Blocktree::open_with_signal(&blocktree_path).unwrap(); Blockstore::open_with_signal(&blockstore_path).unwrap();
let blocktree = Arc::new(blocktree); let blockstore = Arc::new(blockstore);
let mut root = 0; let mut root = 0;
let num_slots = 100; let num_slots = 100;
let entries_per_slot = 5; let entries_per_slot = 5;
let blocktree_ = blocktree.clone(); let blockstore_ = blockstore.clone();
// Spin up thread to write to blocktree // Spin up thread to write to blockstore
let writer = Builder::new() let writer = Builder::new()
.name("writer".to_string()) .name("writer".to_string())
.spawn(move || { .spawn(move || {
@ -729,7 +729,7 @@ mod test {
let step = rng.gen_range(1, max_step + 1) as usize; let step = rng.gen_range(1, max_step + 1) as usize;
let step = std::cmp::min(step, num_shreds - i); let step = std::cmp::min(step, num_shreds - i);
let shreds_to_insert = shreds.drain(..step).collect_vec(); let shreds_to_insert = shreds.drain(..step).collect_vec();
blocktree_ blockstore_
.insert_shreds(shreds_to_insert, None, false) .insert_shreds(shreds_to_insert, None, false)
.unwrap(); .unwrap();
sleep(Duration::from_millis(repair_interval_ms)); sleep(Duration::from_millis(repair_interval_ms));
@ -748,7 +748,7 @@ mod test {
RepairService::update_epoch_slots( RepairService::update_epoch_slots(
Pubkey::default(), Pubkey::default(),
root, root,
blocktree.lowest_slot(), blockstore.lowest_slot(),
&mut root.clone(), &mut root.clone(),
&mut completed_slots, &mut completed_slots,
&cluster_info, &cluster_info,
@ -762,7 +762,7 @@ mod test {
// Update with new root, should filter out the slots <= root // Update with new root, should filter out the slots <= root
root = num_slots / 2; root = num_slots / 2;
let (shreds, _) = make_slot_entries(num_slots + 2, num_slots + 1, entries_per_slot); let (shreds, _) = make_slot_entries(num_slots + 2, num_slots + 1, entries_per_slot);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
RepairService::update_epoch_slots( RepairService::update_epoch_slots(
Pubkey::default(), Pubkey::default(),
root, root,
@ -777,7 +777,7 @@ mod test {
assert_eq!(completed_slots, expected); assert_eq!(completed_slots, expected);
writer.join().unwrap(); writer.join().unwrap();
} }
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]

View File

@ -12,8 +12,8 @@ use solana_ledger::entry::EntryVerificationStatus;
use solana_ledger::{ use solana_ledger::{
bank_forks::BankForks, bank_forks::BankForks,
block_error::BlockError, block_error::BlockError,
blocktree::{Blocktree, BlocktreeError}, blockstore::{Blockstore, BlockstoreError},
blocktree_processor::{self, TransactionStatusSender}, blockstore_processor::{self, TransactionStatusSender},
entry::{Entry, EntrySlice, VerifyRecyclers}, entry::{Entry, EntrySlice, VerifyRecyclers},
leader_schedule_cache::LeaderScheduleCache, leader_schedule_cache::LeaderScheduleCache,
snapshot_package::SnapshotPackageSender, snapshot_package::SnapshotPackageSender,
@ -180,7 +180,7 @@ impl ReplayStage {
#[allow(clippy::new_ret_no_self)] #[allow(clippy::new_ret_no_self)]
pub fn new( pub fn new(
config: ReplayStageConfig, config: ReplayStageConfig,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
bank_forks: Arc<RwLock<BankForks>>, bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
ledger_signal_receiver: Receiver<bool>, ledger_signal_receiver: Receiver<bool>,
@ -237,7 +237,7 @@ impl ReplayStage {
let start = allocated.get(); let start = allocated.get();
Self::generate_new_bank_forks( Self::generate_new_bank_forks(
&blocktree, &blockstore,
&bank_forks, &bank_forks,
&leader_schedule_cache, &leader_schedule_cache,
&subscriptions, &subscriptions,
@ -255,7 +255,7 @@ impl ReplayStage {
let start = allocated.get(); let start = allocated.get();
let did_complete_bank = Self::replay_active_banks( let did_complete_bank = Self::replay_active_banks(
&blocktree, &blockstore,
&bank_forks, &bank_forks,
&my_pubkey, &my_pubkey,
&mut progress, &mut progress,
@ -311,7 +311,7 @@ impl ReplayStage {
&vote_account, &vote_account,
&voting_keypair, &voting_keypair,
&cluster_info, &cluster_info,
&blocktree, &blockstore,
&leader_schedule_cache, &leader_schedule_cache,
&root_bank_sender, &root_bank_sender,
stats.total_staked, stats.total_staked,
@ -328,7 +328,7 @@ impl ReplayStage {
if last_reset != bank.last_blockhash() { if last_reset != bank.last_blockhash() {
Self::reset_poh_recorder( Self::reset_poh_recorder(
&my_pubkey, &my_pubkey,
&blocktree, &blockstore,
&bank, &bank,
&poh_recorder, &poh_recorder,
&leader_schedule_cache, &leader_schedule_cache,
@ -409,7 +409,7 @@ impl ReplayStage {
match result { match result {
Err(RecvTimeoutError::Timeout) => continue, Err(RecvTimeoutError::Timeout) => continue,
Err(_) => break, Err(_) => break,
Ok(_) => trace!("blocktree signal"), Ok(_) => trace!("blockstore signal"),
}; };
} }
Ok(()) Ok(())
@ -535,16 +535,16 @@ impl ReplayStage {
!Bank::can_commit(&tx_error) !Bank::can_commit(&tx_error)
} }
Err(Error::BlockError(_)) => true, Err(Error::BlockError(_)) => true,
Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_))) => true, Err(Error::BlockstoreError(BlockstoreError::InvalidShredData(_))) => true,
Err(Error::BlocktreeError(BlocktreeError::DeadSlot)) => true, Err(Error::BlockstoreError(BlockstoreError::DeadSlot)) => true,
_ => false, _ => false,
} }
} }
// Returns the replay result and the number of replayed transactions // Returns the replay result and the number of replayed transactions
fn replay_blocktree_into_bank( fn replay_blockstore_into_bank(
bank: &Arc<Bank>, bank: &Arc<Bank>,
blocktree: &Blocktree, blockstore: &Blockstore,
bank_progress: &mut ForkProgress, bank_progress: &mut ForkProgress,
transaction_status_sender: Option<TransactionStatusSender>, transaction_status_sender: Option<TransactionStatusSender>,
verify_recyclers: &VerifyRecyclers, verify_recyclers: &VerifyRecyclers,
@ -552,7 +552,7 @@ impl ReplayStage {
let mut tx_count = 0; let mut tx_count = 0;
let now = Instant::now(); let now = Instant::now();
let load_result = let load_result =
Self::load_blocktree_entries_with_shred_info(bank, blocktree, bank_progress); Self::load_blockstore_entries_with_shred_info(bank, blockstore, bank_progress);
let fetch_entries_elapsed = now.elapsed().as_micros(); let fetch_entries_elapsed = now.elapsed().as_micros();
if load_result.is_err() { if load_result.is_err() {
bank_progress.stats.fetch_entries_fail_elapsed += fetch_entries_elapsed as u64; bank_progress.stats.fetch_entries_fail_elapsed += fetch_entries_elapsed as u64;
@ -591,17 +591,17 @@ impl ReplayStage {
("error", format!("error: {:?}", replay_result), String), ("error", format!("error: {:?}", replay_result), String),
("slot", bank.slot(), i64) ("slot", bank.slot(), i64)
); );
Self::mark_dead_slot(bank.slot(), blocktree, bank_progress); Self::mark_dead_slot(bank.slot(), blockstore, bank_progress);
} }
(replay_result, tx_count) (replay_result, tx_count)
} }
fn mark_dead_slot(slot: Slot, blocktree: &Blocktree, bank_progress: &mut ForkProgress) { fn mark_dead_slot(slot: Slot, blockstore: &Blockstore, bank_progress: &mut ForkProgress) {
bank_progress.is_dead = true; bank_progress.is_dead = true;
blocktree blockstore
.set_dead_slot(slot) .set_dead_slot(slot)
.expect("Failed to mark slot as dead in blocktree"); .expect("Failed to mark slot as dead in blockstore");
} }
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
@ -613,7 +613,7 @@ impl ReplayStage {
vote_account: &Pubkey, vote_account: &Pubkey,
voting_keypair: &Option<Arc<Keypair>>, voting_keypair: &Option<Arc<Keypair>>,
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>, leader_schedule_cache: &Arc<LeaderScheduleCache>,
root_bank_sender: &Sender<Vec<Arc<Bank>>>, root_bank_sender: &Sender<Vec<Arc<Bank>>>,
total_staked: u64, total_staked: u64,
@ -637,12 +637,12 @@ impl ReplayStage {
let mut rooted_banks = root_bank.parents(); let mut rooted_banks = root_bank.parents();
rooted_banks.push(root_bank); rooted_banks.push(root_bank);
let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect(); let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect();
// Call leader schedule_cache.set_root() before blocktree.set_root() because // Call leader schedule_cache.set_root() before blockstore.set_root() because
// bank_forks.root is consumed by repair_service to update gossip, so we don't want to // bank_forks.root is consumed by repair_service to update gossip, so we don't want to
// get shreds for repair on gossip before we update leader schedule, otherwise they may // get shreds for repair on gossip before we update leader schedule, otherwise they may
// get dropped. // get dropped.
leader_schedule_cache.set_root(rooted_banks.last().unwrap()); leader_schedule_cache.set_root(rooted_banks.last().unwrap());
blocktree blockstore
.set_roots(&rooted_slots) .set_roots(&rooted_slots)
.expect("Ledger set roots failed"); .expect("Ledger set roots failed");
bank_forks bank_forks
@ -699,13 +699,17 @@ impl ReplayStage {
fn reset_poh_recorder( fn reset_poh_recorder(
my_pubkey: &Pubkey, my_pubkey: &Pubkey,
blocktree: &Blocktree, blockstore: &Blockstore,
bank: &Arc<Bank>, bank: &Arc<Bank>,
poh_recorder: &Arc<Mutex<PohRecorder>>, poh_recorder: &Arc<Mutex<PohRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>, leader_schedule_cache: &Arc<LeaderScheduleCache>,
) { ) {
let next_leader_slot = let next_leader_slot = leader_schedule_cache.next_leader_slot(
leader_schedule_cache.next_leader_slot(&my_pubkey, bank.slot(), &bank, Some(blocktree)); &my_pubkey,
bank.slot(),
&bank,
Some(blockstore),
);
poh_recorder poh_recorder
.lock() .lock()
.unwrap() .unwrap()
@ -727,7 +731,7 @@ impl ReplayStage {
} }
fn replay_active_banks( fn replay_active_banks(
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
bank_forks: &Arc<RwLock<BankForks>>, bank_forks: &Arc<RwLock<BankForks>>,
my_pubkey: &Pubkey, my_pubkey: &Pubkey,
progress: &mut HashMap<u64, ForkProgress>, progress: &mut HashMap<u64, ForkProgress>,
@ -756,9 +760,9 @@ impl ReplayStage {
.entry(bank.slot()) .entry(bank.slot())
.or_insert_with(|| ForkProgress::new(bank.slot(), bank.last_blockhash())); .or_insert_with(|| ForkProgress::new(bank.slot(), bank.last_blockhash()));
if bank.collector_id() != my_pubkey { if bank.collector_id() != my_pubkey {
let (replay_result, replay_tx_count) = Self::replay_blocktree_into_bank( let (replay_result, replay_tx_count) = Self::replay_blockstore_into_bank(
&bank, &bank,
&blocktree, &blockstore,
bank_progress, bank_progress,
transaction_status_sender.clone(), transaction_status_sender.clone(),
verify_recyclers, verify_recyclers,
@ -959,12 +963,12 @@ impl ReplayStage {
} }
} }
fn load_blocktree_entries_with_shred_info( fn load_blockstore_entries_with_shred_info(
bank: &Bank, bank: &Bank,
blocktree: &Blocktree, blockstore: &Blockstore,
bank_progress: &mut ForkProgress, bank_progress: &mut ForkProgress,
) -> Result<(Vec<Entry>, usize, bool)> { ) -> Result<(Vec<Entry>, usize, bool)> {
blocktree blockstore
.get_slot_entries_with_shred_info(bank.slot(), bank_progress.num_shreds as u64) .get_slot_entries_with_shred_info(bank.slot(), bank_progress.num_shreds as u64)
.map_err(|err| err.into()) .map_err(|err| err.into())
} }
@ -1078,7 +1082,7 @@ impl ReplayStage {
let mut replay_elapsed = Measure::start("replay_elapsed"); let mut replay_elapsed = Measure::start("replay_elapsed");
let res = let res =
blocktree_processor::process_entries(bank, entries, true, transaction_status_sender); blockstore_processor::process_entries(bank, entries, true, transaction_status_sender);
replay_elapsed.stop(); replay_elapsed.stop();
bank_progress.stats.replay_elapsed += replay_elapsed.as_us(); bank_progress.stats.replay_elapsed += replay_elapsed.as_us();
@ -1116,7 +1120,7 @@ impl ReplayStage {
} }
fn generate_new_bank_forks( fn generate_new_bank_forks(
blocktree: &Blocktree, blockstore: &Blockstore,
forks_lock: &RwLock<BankForks>, forks_lock: &RwLock<BankForks>,
leader_schedule_cache: &Arc<LeaderScheduleCache>, leader_schedule_cache: &Arc<LeaderScheduleCache>,
subscriptions: &Arc<RpcSubscriptions>, subscriptions: &Arc<RpcSubscriptions>,
@ -1125,7 +1129,7 @@ impl ReplayStage {
let forks = forks_lock.read().unwrap(); let forks = forks_lock.read().unwrap();
let frozen_banks = forks.frozen_banks(); let frozen_banks = forks.frozen_banks();
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect(); let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
let next_slots = blocktree let next_slots = blockstore
.get_slots_since(&frozen_bank_slots) .get_slots_since(&frozen_bank_slots)
.expect("Db error"); .expect("Db error");
// Filter out what we've already seen // Filter out what we've already seen
@ -1188,8 +1192,8 @@ pub(crate) mod tests {
use crossbeam_channel::unbounded; use crossbeam_channel::unbounded;
use solana_client::rpc_request::RpcEncodedTransaction; use solana_client::rpc_request::RpcEncodedTransaction;
use solana_ledger::{ use solana_ledger::{
blocktree::make_slot_entries, blockstore::make_slot_entries,
blocktree::{entries_to_test_shreds, BlocktreeError}, blockstore::{entries_to_test_shreds, BlockstoreError},
create_new_tmp_ledger, create_new_tmp_ledger,
entry::{self, next_entry}, entry::{self, next_entry},
get_tmp_ledger_path, get_tmp_ledger_path,
@ -1499,8 +1503,9 @@ pub(crate) mod tests {
fn test_child_slots_of_same_parent() { fn test_child_slots_of_same_parent() {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
); );
let genesis_config = create_genesis_config(10_000).genesis_config; let genesis_config = create_genesis_config(10_000).genesis_config;
@ -1512,11 +1517,11 @@ pub(crate) mod tests {
// Insert shred for slot 1, generate new forks, check result // Insert shred for slot 1, generate new forks, check result
let (shreds, _) = make_slot_entries(1, 0, 8); let (shreds, _) = make_slot_entries(1, 0, 8);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(bank_forks.get(1).is_none()); assert!(bank_forks.get(1).is_none());
let bank_forks = RwLock::new(bank_forks); let bank_forks = RwLock::new(bank_forks);
ReplayStage::generate_new_bank_forks( ReplayStage::generate_new_bank_forks(
&blocktree, &blockstore,
&bank_forks, &bank_forks,
&leader_schedule_cache, &leader_schedule_cache,
&subscriptions, &subscriptions,
@ -1525,10 +1530,10 @@ pub(crate) mod tests {
// Insert shred for slot 3, generate new forks, check result // Insert shred for slot 3, generate new forks, check result
let (shreds, _) = make_slot_entries(2, 0, 8); let (shreds, _) = make_slot_entries(2, 0, 8);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(bank_forks.read().unwrap().get(2).is_none()); assert!(bank_forks.read().unwrap().get(2).is_none());
ReplayStage::generate_new_bank_forks( ReplayStage::generate_new_bank_forks(
&blocktree, &blockstore,
&bank_forks, &bank_forks,
&leader_schedule_cache, &leader_schedule_cache,
&subscriptions, &subscriptions,
@ -1750,7 +1755,7 @@ pub(crate) mod tests {
assert_matches!( assert_matches!(
res, res,
Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_))) Err(Error::BlockstoreError(BlockstoreError::InvalidShredData(_)))
); );
} }
@ -1762,8 +1767,9 @@ pub(crate) mod tests {
{ {
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let res = { let res = {
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
); );
let GenesisConfigInfo { let GenesisConfigInfo {
mut genesis_config, mut genesis_config,
@ -1778,10 +1784,10 @@ pub(crate) mod tests {
.entry(bank0.slot()) .entry(bank0.slot())
.or_insert_with(|| ForkProgress::new(0, last_blockhash)); .or_insert_with(|| ForkProgress::new(0, last_blockhash));
let shreds = shred_to_insert(&mint_keypair, bank0.clone()); let shreds = shred_to_insert(&mint_keypair, bank0.clone());
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
let (res, _tx_count) = ReplayStage::replay_blocktree_into_bank( let (res, _tx_count) = ReplayStage::replay_blockstore_into_bank(
&bank0, &bank0,
&blocktree, &blockstore,
&mut bank0_progress, &mut bank0_progress,
None, None,
&VerifyRecyclers::default(), &VerifyRecyclers::default(),
@ -1793,8 +1799,8 @@ pub(crate) mod tests {
.map(|b| b.is_dead) .map(|b| b.is_dead)
.unwrap_or(false)); .unwrap_or(false));
// Check that the erroring bank was marked as dead in blocktree // Check that the erroring bank was marked as dead in blockstore
assert!(blocktree.is_dead(bank0.slot())); assert!(blockstore.is_dead(bank0.slot()));
res res
}; };
let _ignored = remove_dir_all(&ledger_path); let _ignored = remove_dir_all(&ledger_path);
@ -1902,11 +1908,11 @@ pub(crate) mod tests {
); );
} }
pub fn create_test_transactions_and_populate_blocktree( pub fn create_test_transactions_and_populate_blockstore(
keypairs: Vec<&Keypair>, keypairs: Vec<&Keypair>,
previous_slot: Slot, previous_slot: Slot,
bank: Arc<Bank>, bank: Arc<Bank>,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
) -> Vec<Signature> { ) -> Vec<Signature> {
let mint_keypair = keypairs[0]; let mint_keypair = keypairs[0];
let keypair1 = keypairs[1]; let keypair1 = keypairs[1];
@ -1933,19 +1939,19 @@ pub(crate) mod tests {
let entries = vec![entry_1, entry_2, entry_3]; let entries = vec![entry_1, entry_2, entry_3];
let shreds = entries_to_test_shreds(entries.clone(), slot, previous_slot, true, 0); let shreds = entries_to_test_shreds(entries.clone(), slot, previous_slot, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&[slot]).unwrap(); blockstore.set_roots(&[slot]).unwrap();
let (transaction_status_sender, transaction_status_receiver) = unbounded(); let (transaction_status_sender, transaction_status_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new( let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver, transaction_status_receiver,
blocktree.clone(), blockstore.clone(),
&Arc::new(AtomicBool::new(false)), &Arc::new(AtomicBool::new(false)),
); );
// Check that process_entries successfully writes can_commit transactions statuses, and // Check that process_entries successfully writes can_commit transactions statuses, and
// that they are matched properly by get_confirmed_block // that they are matched properly by get_confirmed_block
let _result = blocktree_processor::process_entries( let _result = blockstore_processor::process_entries(
&bank, &bank,
&entries, &entries,
true, true,
@ -1966,9 +1972,9 @@ pub(crate) mod tests {
} = create_genesis_config(1000); } = create_genesis_config(1000);
let (ledger_path, _) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, _) = create_new_tmp_ledger!(&genesis_config);
{ {
let blocktree = Blocktree::open(&ledger_path) let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to successfully open database ledger"); .expect("Expected to successfully open database ledger");
let blocktree = Arc::new(blocktree); let blockstore = Arc::new(blockstore);
let keypair1 = Keypair::new(); let keypair1 = Keypair::new();
let keypair2 = Keypair::new(); let keypair2 = Keypair::new();
@ -1982,14 +1988,14 @@ pub(crate) mod tests {
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
let slot = bank1.slot(); let slot = bank1.slot();
let signatures = create_test_transactions_and_populate_blocktree( let signatures = create_test_transactions_and_populate_blockstore(
vec![&mint_keypair, &keypair1, &keypair2, &keypair3], vec![&mint_keypair, &keypair1, &keypair2, &keypair3],
bank0.slot(), bank0.slot(),
bank1, bank1,
blocktree.clone(), blockstore.clone(),
); );
let confirmed_block = blocktree.get_confirmed_block(slot, None).unwrap(); let confirmed_block = blockstore.get_confirmed_block(slot, None).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3); assert_eq!(confirmed_block.transactions.len(), 3);
for (transaction, result) in confirmed_block.transactions.into_iter() { for (transaction, result) in confirmed_block.transactions.into_iter() {
@ -2010,6 +2016,6 @@ pub(crate) mod tests {
} }
} }
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
} }

View File

@ -3,7 +3,7 @@
use crate::cluster_info; use crate::cluster_info;
use crate::poh_recorder; use crate::poh_recorder;
use solana_ledger::block_error; use solana_ledger::block_error;
use solana_ledger::blocktree; use solana_ledger::blockstore;
use solana_ledger::snapshot_utils; use solana_ledger::snapshot_utils;
use solana_sdk::transaction; use solana_sdk::transaction;
use std::any::Any; use std::any::Any;
@ -27,7 +27,7 @@ pub enum Error {
SendError, SendError,
PohRecorderError(poh_recorder::PohRecorderError), PohRecorderError(poh_recorder::PohRecorderError),
BlockError(block_error::BlockError), BlockError(block_error::BlockError),
BlocktreeError(blocktree::BlocktreeError), BlockstoreError(blockstore::BlockstoreError),
FsExtra(fs_extra::error::Error), FsExtra(fs_extra::error::Error),
SnapshotError(snapshot_utils::SnapshotError), SnapshotError(snapshot_utils::SnapshotError),
} }
@ -127,9 +127,9 @@ impl std::convert::From<poh_recorder::PohRecorderError> for Error {
Error::PohRecorderError(e) Error::PohRecorderError(e)
} }
} }
impl std::convert::From<blocktree::BlocktreeError> for Error { impl std::convert::From<blockstore::BlockstoreError> for Error {
fn from(e: blocktree::BlocktreeError) -> Error { fn from(e: blockstore::BlockstoreError) -> Error {
Error::BlocktreeError(e) Error::BlockstoreError(e)
} }
} }
impl std::convert::From<snapshot_utils::SnapshotError> for Error { impl std::convert::From<snapshot_utils::SnapshotError> for Error {

View File

@ -12,7 +12,7 @@ use crate::{
use crossbeam_channel::Receiver as CrossbeamReceiver; use crossbeam_channel::Receiver as CrossbeamReceiver;
use solana_ledger::{ use solana_ledger::{
bank_forks::BankForks, bank_forks::BankForks,
blocktree::{Blocktree, CompletedSlotsReceiver}, blockstore::{Blockstore, CompletedSlotsReceiver},
leader_schedule_cache::LeaderScheduleCache, leader_schedule_cache::LeaderScheduleCache,
staking_utils, staking_utils,
}; };
@ -205,7 +205,7 @@ impl RetransmitStage {
pub fn new( pub fn new(
bank_forks: Arc<RwLock<BankForks>>, bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>, leader_schedule_cache: &Arc<LeaderScheduleCache>,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
retransmit_sockets: Arc<Vec<UdpSocket>>, retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>, repair_socket: Arc<UdpSocket>,
@ -234,7 +234,7 @@ impl RetransmitStage {
}; };
let leader_schedule_cache = leader_schedule_cache.clone(); let leader_schedule_cache = leader_schedule_cache.clone();
let window_service = WindowService::new( let window_service = WindowService::new(
blocktree, blockstore,
cluster_info.clone(), cluster_info.clone(),
verified_receiver, verified_receiver,
retransmit_sender, retransmit_sender,
@ -281,7 +281,7 @@ mod tests {
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use crate::packet::{self, Meta, Packet, Packets}; use crate::packet::{self, Meta, Packet, Packets};
use solana_ledger::blocktree_processor::{process_blocktree, ProcessOptions}; use solana_ledger::blockstore_processor::{process_blockstore, ProcessOptions};
use solana_ledger::create_new_tmp_ledger; use solana_ledger::create_new_tmp_ledger;
use solana_net_utils::find_available_port_in_range; use solana_net_utils::find_available_port_in_range;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
@ -290,13 +290,13 @@ mod tests {
fn test_skip_repair() { fn test_skip_repair() {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let opts = ProcessOptions { let opts = ProcessOptions {
full_leader_cache: true, full_leader_cache: true,
..ProcessOptions::default() ..ProcessOptions::default()
}; };
let (bank_forks, _, cached_leader_schedule) = let (bank_forks, _, cached_leader_schedule) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
let leader_schedule_cache = Arc::new(cached_leader_schedule); let leader_schedule_cache = Arc::new(cached_leader_schedule);
let bank_forks = Arc::new(RwLock::new(bank_forks)); let bank_forks = Arc::new(RwLock::new(bank_forks));

View File

@ -18,7 +18,7 @@ use solana_client::rpc_request::{
}; };
use solana_faucet::faucet::request_airdrop_transaction; use solana_faucet::faucet::request_airdrop_transaction;
use solana_ledger::{ use solana_ledger::{
bank_forks::BankForks, blocktree::Blocktree, rooted_slot_iterator::RootedSlotIterator, bank_forks::BankForks, blockstore::Blockstore, rooted_slot_iterator::RootedSlotIterator,
}; };
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::{ use solana_sdk::{
@ -69,7 +69,7 @@ impl Default for JsonRpcConfig {
pub struct JsonRpcRequestProcessor { pub struct JsonRpcRequestProcessor {
bank_forks: Arc<RwLock<BankForks>>, bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
config: JsonRpcConfig, config: JsonRpcConfig,
storage_state: StorageState, storage_state: StorageState,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>, validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
@ -94,7 +94,7 @@ impl JsonRpcRequestProcessor {
config: JsonRpcConfig, config: JsonRpcConfig,
bank_forks: Arc<RwLock<BankForks>>, bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
storage_state: StorageState, storage_state: StorageState,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>, validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
) -> Self { ) -> Self {
@ -102,7 +102,7 @@ impl JsonRpcRequestProcessor {
config, config,
bank_forks, bank_forks,
block_commitment_cache, block_commitment_cache,
blocktree, blockstore,
storage_state, storage_state,
validator_exit, validator_exit,
} }
@ -318,7 +318,7 @@ impl JsonRpcRequestProcessor {
slot: Slot, slot: Slot,
encoding: Option<RpcTransactionEncoding>, encoding: Option<RpcTransactionEncoding>,
) -> Result<Option<RpcConfirmedBlock>> { ) -> Result<Option<RpcConfirmedBlock>> {
Ok(self.blocktree.get_confirmed_block(slot, encoding).ok()) Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
} }
pub fn get_confirmed_blocks( pub fn get_confirmed_blocks(
@ -331,9 +331,9 @@ impl JsonRpcRequestProcessor {
return Ok(vec![]); return Ok(vec![]);
} }
let start_slot = (start_slot..end_slot).find(|&slot| self.blocktree.is_root(slot)); let start_slot = (start_slot..end_slot).find(|&slot| self.blockstore.is_root(slot));
if let Some(start_slot) = start_slot { if let Some(start_slot) = start_slot {
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blocktree) let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blockstore)
.unwrap() .unwrap()
.map(|(slot, _)| slot) .map(|(slot, _)| slot)
.collect(); .collect();
@ -349,14 +349,14 @@ impl JsonRpcRequestProcessor {
// genesis (ie. that this bank's slot_per_year will be applicable to any rooted slot being // genesis (ie. that this bank's slot_per_year will be applicable to any rooted slot being
// queried). If these values will be variable in the future, those timing parameters will // queried). If these values will be variable in the future, those timing parameters will
// need to be stored persistently, and the slot_duration calculation will likely need to be // need to be stored persistently, and the slot_duration calculation will likely need to be
// moved upstream into blocktree. Also, an explicit commitment level will need to be set. // moved upstream into blockstore. Also, an explicit commitment level will need to be set.
let bank = self.bank(None); let bank = self.bank(None);
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year()); let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
let epoch = bank.epoch_schedule().get_epoch(slot); let epoch = bank.epoch_schedule().get_epoch(slot);
let stakes = HashMap::new(); let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes); let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
Ok(self.blocktree.get_block_time(slot, slot_duration, stakes)) Ok(self.blockstore.get_block_time(slot, slot_duration, stakes))
} }
} }
@ -1068,13 +1068,13 @@ pub mod tests {
use crate::{ use crate::{
contact_info::ContactInfo, contact_info::ContactInfo,
genesis_utils::{create_genesis_config, GenesisConfigInfo}, genesis_utils::{create_genesis_config, GenesisConfigInfo},
replay_stage::tests::create_test_transactions_and_populate_blocktree, replay_stage::tests::create_test_transactions_and_populate_blockstore,
}; };
use bincode::deserialize; use bincode::deserialize;
use jsonrpc_core::{MetaIoHandler, Output, Response, Value}; use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
use solana_client::rpc_request::RpcEncodedTransaction; use solana_client::rpc_request::RpcEncodedTransaction;
use solana_ledger::{ use solana_ledger::{
blocktree::entries_to_test_shreds, blocktree_processor::fill_blocktree_slot_with_ticks, blockstore::entries_to_test_shreds, blockstore_processor::fill_blockstore_slot_with_ticks,
entry::next_entry_mut, get_tmp_ledger_path, entry::next_entry_mut, get_tmp_ledger_path,
}; };
use solana_sdk::{ use solana_sdk::{
@ -1112,12 +1112,12 @@ pub mod tests {
} }
fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler { fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler {
start_rpc_handler_with_tx_and_blocktree(pubkey, vec![], 0) start_rpc_handler_with_tx_and_blockstore(pubkey, vec![], 0)
} }
fn start_rpc_handler_with_tx_and_blocktree( fn start_rpc_handler_with_tx_and_blockstore(
pubkey: &Pubkey, pubkey: &Pubkey,
blocktree_roots: Vec<Slot>, blockstore_roots: Vec<Slot>,
default_timestamp: i64, default_timestamp: i64,
) -> RpcHandler { ) -> RpcHandler {
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks(); let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
@ -1135,21 +1135,21 @@ pub mod tests {
let block_commitment_cache = let block_commitment_cache =
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42))); Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let blocktree = Arc::new(blocktree); let blockstore = Arc::new(blockstore);
let keypair1 = Keypair::new(); let keypair1 = Keypair::new();
let keypair2 = Keypair::new(); let keypair2 = Keypair::new();
let keypair3 = Keypair::new(); let keypair3 = Keypair::new();
bank.transfer(4, &alice, &keypair2.pubkey()).unwrap(); bank.transfer(4, &alice, &keypair2.pubkey()).unwrap();
let confirmed_block_signatures = create_test_transactions_and_populate_blocktree( let confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
vec![&alice, &keypair1, &keypair2, &keypair3], vec![&alice, &keypair1, &keypair2, &keypair3],
0, 0,
bank.clone(), bank.clone(),
blocktree.clone(), blockstore.clone(),
); );
// Add timestamp vote to blocktree // Add timestamp vote to blockstore
let vote = Vote { let vote = Vote {
slots: vec![1], slots: vec![1],
hash: Hash::default(), hash: Hash::default(),
@ -1172,10 +1172,10 @@ pub mod tests {
true, true,
0, 0,
); );
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&[1]).unwrap(); blockstore.set_roots(&[1]).unwrap();
let mut roots = blocktree_roots.clone(); let mut roots = blockstore_roots.clone();
if !roots.is_empty() { if !roots.is_empty() {
roots.retain(|&x| x > 1); roots.retain(|&x| x > 1);
let mut parent_bank = bank; let mut parent_bank = bank;
@ -1186,9 +1186,9 @@ pub mod tests {
parent_bank.squash(); parent_bank.squash();
bank_forks.write().unwrap().set_root(*root, &None); bank_forks.write().unwrap().set_root(*root, &None);
let parent = if i > 0 { roots[i - 1] } else { 1 }; let parent = if i > 0 { roots[i - 1] } else { 1 };
fill_blocktree_slot_with_ticks(&blocktree, 5, *root, parent, Hash::default()); fill_blockstore_slot_with_ticks(&blockstore, 5, *root, parent, Hash::default());
} }
blocktree.set_roots(&roots).unwrap(); blockstore.set_roots(&roots).unwrap();
let new_bank = Bank::new_from_parent( let new_bank = Bank::new_from_parent(
&parent_bank, &parent_bank,
parent_bank.collector_id(), parent_bank.collector_id(),
@ -1214,7 +1214,7 @@ pub mod tests {
JsonRpcConfig::default(), JsonRpcConfig::default(),
bank_forks.clone(), bank_forks.clone(),
block_commitment_cache.clone(), block_commitment_cache.clone(),
blocktree, blockstore,
StorageState::default(), StorageState::default(),
validator_exit, validator_exit,
))); )));
@ -1261,12 +1261,12 @@ pub mod tests {
let bank = bank_forks.read().unwrap().working_bank(); let bank = bank_forks.read().unwrap().working_bank();
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let request_processor = JsonRpcRequestProcessor::new( let request_processor = JsonRpcRequestProcessor::new(
JsonRpcConfig::default(), JsonRpcConfig::default(),
bank_forks, bank_forks,
block_commitment_cache, block_commitment_cache,
Arc::new(blocktree), Arc::new(blockstore),
StorageState::default(), StorageState::default(),
validator_exit, validator_exit,
); );
@ -1752,7 +1752,7 @@ pub mod tests {
let validator_exit = create_validator_exit(&exit); let validator_exit = create_validator_exit(&exit);
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut io = MetaIoHandler::default(); let mut io = MetaIoHandler::default();
let rpc = RpcSolImpl; let rpc = RpcSolImpl;
@ -1763,7 +1763,7 @@ pub mod tests {
JsonRpcConfig::default(), JsonRpcConfig::default(),
new_bank_forks().0, new_bank_forks().0,
block_commitment_cache, block_commitment_cache,
Arc::new(blocktree), Arc::new(blockstore),
StorageState::default(), StorageState::default(),
validator_exit, validator_exit,
); );
@ -1856,12 +1856,12 @@ pub mod tests {
let validator_exit = create_validator_exit(&exit); let validator_exit = create_validator_exit(&exit);
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let request_processor = JsonRpcRequestProcessor::new( let request_processor = JsonRpcRequestProcessor::new(
JsonRpcConfig::default(), JsonRpcConfig::default(),
new_bank_forks().0, new_bank_forks().0,
block_commitment_cache, block_commitment_cache,
Arc::new(blocktree), Arc::new(blockstore),
StorageState::default(), StorageState::default(),
validator_exit, validator_exit,
); );
@ -1875,14 +1875,14 @@ pub mod tests {
let validator_exit = create_validator_exit(&exit); let validator_exit = create_validator_exit(&exit);
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut config = JsonRpcConfig::default(); let mut config = JsonRpcConfig::default();
config.enable_validator_exit = true; config.enable_validator_exit = true;
let request_processor = JsonRpcRequestProcessor::new( let request_processor = JsonRpcRequestProcessor::new(
config, config,
new_bank_forks().0, new_bank_forks().0,
block_commitment_cache, block_commitment_cache,
Arc::new(blocktree), Arc::new(blockstore),
StorageState::default(), StorageState::default(),
validator_exit, validator_exit,
); );
@ -1927,7 +1927,7 @@ pub mod tests {
let block_commitment_cache = let block_commitment_cache =
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42))); Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut config = JsonRpcConfig::default(); let mut config = JsonRpcConfig::default();
config.enable_validator_exit = true; config.enable_validator_exit = true;
@ -1935,7 +1935,7 @@ pub mod tests {
config, config,
new_bank_forks().0, new_bank_forks().0,
block_commitment_cache, block_commitment_cache,
Arc::new(blocktree), Arc::new(blockstore),
StorageState::default(), StorageState::default(),
validator_exit, validator_exit,
); );
@ -2082,7 +2082,7 @@ pub mod tests {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let roots = vec![0, 1, 3, 4, 8]; let roots = vec![0, 1, 3, 4, 8];
let RpcHandler { io, meta, .. } = let RpcHandler { io, meta, .. } =
start_rpc_handler_with_tx_and_blocktree(&bob_pubkey, roots.clone(), 0); start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots.clone(), 0);
let req = let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0]}}"#); format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0]}}"#);
@ -2129,7 +2129,7 @@ pub mod tests {
fn test_get_block_time() { fn test_get_block_time() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let base_timestamp = 1576183541; let base_timestamp = 1576183541;
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blocktree( let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blockstore(
&bob_pubkey, &bob_pubkey,
vec![1, 2, 3, 4, 5, 6, 7], vec![1, 2, 3, 4, 5, 6, 7],
base_timestamp, base_timestamp,

View File

@ -9,7 +9,7 @@ use jsonrpc_http_server::{
hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, RequestMiddleware, hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, RequestMiddleware,
RequestMiddlewareAction, ServerBuilder, RequestMiddlewareAction, ServerBuilder,
}; };
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree}; use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use std::{ use std::{
net::SocketAddr, net::SocketAddr,
@ -91,7 +91,7 @@ impl JsonRpcService {
config: JsonRpcConfig, config: JsonRpcConfig,
bank_forks: Arc<RwLock<BankForks>>, bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
genesis_hash: Hash, genesis_hash: Hash,
ledger_path: &Path, ledger_path: &Path,
@ -104,7 +104,7 @@ impl JsonRpcService {
config, config,
bank_forks, bank_forks,
block_commitment_cache, block_commitment_cache,
blocktree, blockstore,
storage_state, storage_state,
validator_exit.clone(), validator_exit.clone(),
))); )));
@ -204,13 +204,13 @@ mod tests {
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank))); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut rpc_service = JsonRpcService::new( let mut rpc_service = JsonRpcService::new(
rpc_addr, rpc_addr,
JsonRpcConfig::default(), JsonRpcConfig::default(),
bank_forks, bank_forks,
block_commitment_cache, block_commitment_cache,
Arc::new(blocktree), Arc::new(blockstore),
cluster_info, cluster_info,
Hash::default(), Hash::default(),
&PathBuf::from("farf"), &PathBuf::from("farf"),

View File

@ -2,7 +2,7 @@
use crate::packet::{Packet, PacketsRecycler}; use crate::packet::{Packet, PacketsRecycler};
use crate::streamer::{self, PacketReceiver, PacketSender}; use crate::streamer::{self, PacketReceiver, PacketSender};
use solana_ledger::blocktree::MAX_DATA_SHREDS_PER_SLOT; use solana_ledger::blockstore::MAX_DATA_SHREDS_PER_SLOT;
use solana_ledger::shred::{OFFSET_OF_SHRED_INDEX, SIZE_OF_SHRED_INDEX}; use solana_ledger::shred::{OFFSET_OF_SHRED_INDEX, SIZE_OF_SHRED_INDEX};
use solana_perf::cuda_runtime::PinnedVec; use solana_perf::cuda_runtime::PinnedVec;
use solana_perf::packet::limited_deserialize; use solana_perf::packet::limited_deserialize;

View File

@ -10,7 +10,7 @@ use crate::{
}; };
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng; use rand_chacha::ChaChaRng;
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree}; use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_runtime::{bank::Bank, storage_utils::archiver_accounts}; use solana_runtime::{bank::Bank, storage_utils::archiver_accounts};
use solana_sdk::{ use solana_sdk::{
account::Account, account::Account,
@ -177,7 +177,7 @@ impl StorageStage {
pub fn new( pub fn new(
storage_state: &StorageState, storage_state: &StorageState,
bank_receiver: Receiver<Vec<Arc<Bank>>>, bank_receiver: Receiver<Vec<Arc<Bank>>>,
blocktree: Option<Arc<Blocktree>>, blockstore: Option<Arc<Blockstore>>,
keypair: &Arc<Keypair>, keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>, storage_keypair: &Arc<Keypair>,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
@ -197,12 +197,12 @@ impl StorageStage {
let mut current_key = 0; let mut current_key = 0;
let mut storage_slots = StorageSlots::default(); let mut storage_slots = StorageSlots::default();
loop { loop {
if let Some(ref some_blocktree) = blocktree { if let Some(ref some_blockstore) = blockstore {
if let Err(e) = Self::process_entries( if let Err(e) = Self::process_entries(
&storage_keypair, &storage_keypair,
&storage_state_inner, &storage_state_inner,
&bank_receiver, &bank_receiver,
&some_blocktree, &some_blockstore,
&mut storage_slots, &mut storage_slots,
&mut current_key, &mut current_key,
slots_per_turn, slots_per_turn,
@ -368,7 +368,7 @@ impl StorageStage {
fn process_turn( fn process_turn(
storage_keypair: &Arc<Keypair>, storage_keypair: &Arc<Keypair>,
state: &Arc<RwLock<StorageStateInner>>, state: &Arc<RwLock<StorageStateInner>>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
blockhash: Hash, blockhash: Hash,
slot: Slot, slot: Slot,
slots_per_segment: u64, slots_per_segment: u64,
@ -431,7 +431,7 @@ impl StorageStage {
let mut statew = state.write().unwrap(); let mut statew = state.write().unwrap();
match chacha_cbc_encrypt_file_many_keys( match chacha_cbc_encrypt_file_many_keys(
blocktree, blockstore,
segment as u64, segment as u64,
statew.slots_per_segment, statew.slots_per_segment,
&mut statew.storage_keys, &mut statew.storage_keys,
@ -502,7 +502,7 @@ impl StorageStage {
storage_keypair: &Arc<Keypair>, storage_keypair: &Arc<Keypair>,
storage_state: &Arc<RwLock<StorageStateInner>>, storage_state: &Arc<RwLock<StorageStateInner>>,
bank_receiver: &Receiver<Vec<Arc<Bank>>>, bank_receiver: &Receiver<Vec<Arc<Bank>>>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
storage_slots: &mut StorageSlots, storage_slots: &mut StorageSlots,
current_key_idx: &mut usize, current_key_idx: &mut usize,
slots_per_turn: u64, slots_per_turn: u64,
@ -541,7 +541,7 @@ impl StorageStage {
let _ignored = Self::process_turn( let _ignored = Self::process_turn(
&storage_keypair, &storage_keypair,
&storage_state, &storage_state,
&blocktree, &blockstore,
bank.last_blockhash(), bank.last_blockhash(),
bank.slot(), bank.slot(),
bank.slots_per_segment(), bank.slots_per_segment(),

View File

@ -12,7 +12,7 @@ use crate::{
sigverify_stage::{DisabledSigVerifier, SigVerifyStage}, sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
}; };
use crossbeam_channel::unbounded; use crossbeam_channel::unbounded;
use solana_ledger::{blocktree::Blocktree, blocktree_processor::TransactionStatusSender}; use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusSender};
use std::{ use std::{
net::UdpSocket, net::UdpSocket,
sync::{ sync::{
@ -42,7 +42,7 @@ impl Tpu {
broadcast_sockets: Vec<UdpSocket>, broadcast_sockets: Vec<UdpSocket>,
sigverify_disabled: bool, sigverify_disabled: bool,
transaction_status_sender: Option<TransactionStatusSender>, transaction_status_sender: Option<TransactionStatusSender>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
broadcast_type: &BroadcastStageType, broadcast_type: &BroadcastStageType,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
shred_version: u16, shred_version: u16,
@ -87,7 +87,7 @@ impl Tpu {
cluster_info.clone(), cluster_info.clone(),
entry_receiver, entry_receiver,
&exit, &exit,
blocktree, blockstore,
shred_version, shred_version,
); );

View File

@ -1,6 +1,6 @@
use crossbeam_channel::{Receiver, RecvTimeoutError}; use crossbeam_channel::{Receiver, RecvTimeoutError};
use solana_client::rpc_request::RpcTransactionStatus; use solana_client::rpc_request::RpcTransactionStatus;
use solana_ledger::{blocktree::Blocktree, blocktree_processor::TransactionStatusBatch}; use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusBatch};
use solana_runtime::bank::{Bank, HashAgeKind}; use solana_runtime::bank::{Bank, HashAgeKind};
use std::{ use std::{
sync::{ sync::{
@ -19,7 +19,7 @@ impl TransactionStatusService {
#[allow(clippy::new_ret_no_self)] #[allow(clippy::new_ret_no_self)]
pub fn new( pub fn new(
write_transaction_status_receiver: Receiver<TransactionStatusBatch>, write_transaction_status_receiver: Receiver<TransactionStatusBatch>,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
) -> Self { ) -> Self {
let exit = exit.clone(); let exit = exit.clone();
@ -31,7 +31,7 @@ impl TransactionStatusService {
} }
if let Err(RecvTimeoutError::Disconnected) = Self::write_transaction_status_batch( if let Err(RecvTimeoutError::Disconnected) = Self::write_transaction_status_batch(
&write_transaction_status_receiver, &write_transaction_status_receiver,
&blocktree, &blockstore,
) { ) {
break; break;
} }
@ -42,7 +42,7 @@ impl TransactionStatusService {
fn write_transaction_status_batch( fn write_transaction_status_batch(
write_transaction_status_receiver: &Receiver<TransactionStatusBatch>, write_transaction_status_receiver: &Receiver<TransactionStatusBatch>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
) -> Result<(), RecvTimeoutError> { ) -> Result<(), RecvTimeoutError> {
let TransactionStatusBatch { let TransactionStatusBatch {
bank, bank,
@ -68,7 +68,7 @@ impl TransactionStatusService {
.get_fee_calculator(&fee_hash) .get_fee_calculator(&fee_hash)
.expect("FeeCalculator must exist"); .expect("FeeCalculator must exist");
let fee = fee_calculator.calculate_fee(transaction.message()); let fee = fee_calculator.calculate_fee(transaction.message());
blocktree blockstore
.write_transaction_status( .write_transaction_status(
(slot, transaction.signatures[0]), (slot, transaction.signatures[0]),
&RpcTransactionStatus { &RpcTransactionStatus {

View File

@ -21,8 +21,8 @@ use crossbeam_channel::unbounded;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache; use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::{ use solana_ledger::{
bank_forks::BankForks, bank_forks::BankForks,
blocktree::{Blocktree, CompletedSlotsReceiver}, blockstore::{Blockstore, CompletedSlotsReceiver},
blocktree_processor::TransactionStatusSender, blockstore_processor::TransactionStatusSender,
}; };
use solana_sdk::{ use solana_sdk::{
pubkey::Pubkey, pubkey::Pubkey,
@ -63,7 +63,7 @@ impl Tvu {
/// # Arguments /// # Arguments
/// * `cluster_info` - The cluster_info state. /// * `cluster_info` - The cluster_info state.
/// * `sockets` - fetch, repair, and retransmit sockets /// * `sockets` - fetch, repair, and retransmit sockets
/// * `blocktree` - the ledger itself /// * `blockstore` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)] #[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new( pub fn new(
vote_account: &Pubkey, vote_account: &Pubkey,
@ -72,7 +72,7 @@ impl Tvu {
bank_forks: &Arc<RwLock<BankForks>>, bank_forks: &Arc<RwLock<BankForks>>,
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
sockets: Sockets, sockets: Sockets,
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
storage_state: &StorageState, storage_state: &StorageState,
blockstream_unix_socket: Option<&PathBuf>, blockstream_unix_socket: Option<&PathBuf>,
max_ledger_slots: Option<u64>, max_ledger_slots: Option<u64>,
@ -133,7 +133,7 @@ impl Tvu {
let retransmit_stage = RetransmitStage::new( let retransmit_stage = RetransmitStage::new(
bank_forks.clone(), bank_forks.clone(),
leader_schedule_cache, leader_schedule_cache,
blocktree.clone(), blockstore.clone(),
&cluster_info, &cluster_info,
Arc::new(retransmit_sockets), Arc::new(retransmit_sockets),
repair_socket, repair_socket,
@ -175,7 +175,7 @@ impl Tvu {
let (replay_stage, root_bank_receiver) = ReplayStage::new( let (replay_stage, root_bank_receiver) = ReplayStage::new(
replay_stage_config, replay_stage_config,
blocktree.clone(), blockstore.clone(),
bank_forks.clone(), bank_forks.clone(),
cluster_info.clone(), cluster_info.clone(),
ledger_signal_receiver, ledger_signal_receiver,
@ -185,7 +185,7 @@ impl Tvu {
let blockstream_service = if let Some(blockstream_unix_socket) = blockstream_unix_socket { let blockstream_service = if let Some(blockstream_unix_socket) = blockstream_unix_socket {
let blockstream_service = BlockstreamService::new( let blockstream_service = BlockstreamService::new(
blockstream_slot_receiver, blockstream_slot_receiver,
blocktree.clone(), blockstore.clone(),
blockstream_unix_socket, blockstream_unix_socket,
&exit, &exit,
); );
@ -197,7 +197,7 @@ impl Tvu {
let ledger_cleanup_service = max_ledger_slots.map(|max_ledger_slots| { let ledger_cleanup_service = max_ledger_slots.map(|max_ledger_slots| {
LedgerCleanupService::new( LedgerCleanupService::new(
ledger_cleanup_slot_receiver, ledger_cleanup_slot_receiver,
blocktree.clone(), blockstore.clone(),
max_ledger_slots, max_ledger_slots,
&exit, &exit,
) )
@ -206,7 +206,7 @@ impl Tvu {
let storage_stage = StorageStage::new( let storage_stage = StorageStage::new(
storage_state, storage_state,
root_bank_receiver, root_bank_receiver,
Some(blocktree), Some(blockstore),
&keypair, &keypair,
storage_keypair, storage_keypair,
&exit, &exit,
@ -272,14 +272,14 @@ pub mod tests {
cluster_info1.insert_info(leader.info.clone()); cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1)); let cref1 = Arc::new(RwLock::new(cluster_info1));
let (blocktree_path, _) = create_new_tmp_ledger!(&genesis_config); let (blockstore_path, _) = create_new_tmp_ledger!(&genesis_config);
let (blocktree, l_receiver, completed_slots_receiver) = let (blockstore, l_receiver, completed_slots_receiver) =
Blocktree::open_with_signal(&blocktree_path) Blockstore::open_with_signal(&blockstore_path)
.expect("Expected to successfully open ledger"); .expect("Expected to successfully open ledger");
let blocktree = Arc::new(blocktree); let blockstore = Arc::new(blockstore);
let bank = bank_forks.working_bank(); let bank = bank_forks.working_bank();
let (exit, poh_recorder, poh_service, _entry_receiver) = let (exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank, &blocktree, None); create_test_recorder(&bank, &blockstore, None);
let voting_keypair = Keypair::new(); let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new()); let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
@ -298,7 +298,7 @@ pub mod tests {
forwards: target1.sockets.tvu_forwards, forwards: target1.sockets.tvu_forwards,
} }
}, },
blocktree, blockstore,
&StorageState::default(), &StorageState::default(),
None, None,
None, None,

View File

@ -23,8 +23,8 @@ use crossbeam_channel::unbounded;
use solana_ledger::{ use solana_ledger::{
bank_forks::{BankForks, SnapshotConfig}, bank_forks::{BankForks, SnapshotConfig},
bank_forks_utils, bank_forks_utils,
blocktree::{Blocktree, CompletedSlotsReceiver}, blockstore::{Blockstore, CompletedSlotsReceiver},
blocktree_processor::{self, BankForksInfo}, blockstore_processor::{self, BankForksInfo},
create_new_tmp_ledger, create_new_tmp_ledger,
leader_schedule::FixedSchedule, leader_schedule::FixedSchedule,
leader_schedule_cache::LeaderScheduleCache, leader_schedule_cache::LeaderScheduleCache,
@ -156,12 +156,12 @@ impl Validator {
genesis_hash, genesis_hash,
bank_forks, bank_forks,
bank_forks_info, bank_forks_info,
blocktree, blockstore,
ledger_signal_receiver, ledger_signal_receiver,
completed_slots_receiver, completed_slots_receiver,
leader_schedule_cache, leader_schedule_cache,
poh_config, poh_config,
) = new_banks_from_blocktree( ) = new_banks_from_blockstore(
config.expected_genesis_hash, config.expected_genesis_hash,
ledger_path, ledger_path,
config.account_paths.clone(), config.account_paths.clone(),
@ -197,7 +197,7 @@ impl Validator {
bank.slots_per_segment(), bank.slots_per_segment(),
); );
let blocktree = Arc::new(blocktree); let blockstore = Arc::new(blockstore);
let rpc_service = if node.info.rpc.port() == 0 { let rpc_service = if node.info.rpc.port() == 0 {
None None
@ -207,7 +207,7 @@ impl Validator {
config.rpc_config.clone(), config.rpc_config.clone(),
bank_forks.clone(), bank_forks.clone(),
block_commitment_cache.clone(), block_commitment_cache.clone(),
blocktree.clone(), blockstore.clone(),
cluster_info.clone(), cluster_info.clone(),
genesis_hash, genesis_hash,
ledger_path, ledger_path,
@ -237,7 +237,7 @@ impl Validator {
Some(transaction_status_sender), Some(transaction_status_sender),
Some(TransactionStatusService::new( Some(TransactionStatusService::new(
transaction_status_receiver, transaction_status_receiver,
blocktree.clone(), blockstore.clone(),
&exit, &exit,
)), )),
) )
@ -265,11 +265,11 @@ impl Validator {
bank.tick_height(), bank.tick_height(),
bank.last_blockhash(), bank.last_blockhash(),
bank.slot(), bank.slot(),
leader_schedule_cache.next_leader_slot(&id, bank.slot(), &bank, Some(&blocktree)), leader_schedule_cache.next_leader_slot(&id, bank.slot(), &bank, Some(&blockstore)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&id, &id,
&blocktree, &blockstore,
blocktree.new_shreds_signals.first().cloned(), blockstore.new_shreds_signals.first().cloned(),
&leader_schedule_cache, &leader_schedule_cache,
&poh_config, &poh_config,
); );
@ -282,7 +282,7 @@ impl Validator {
let gossip_service = GossipService::new( let gossip_service = GossipService::new(
&cluster_info, &cluster_info,
Some(blocktree.clone()), Some(blockstore.clone()),
Some(bank_forks.clone()), Some(bank_forks.clone()),
node.sockets.gossip, node.sockets.gossip,
&exit, &exit,
@ -347,7 +347,7 @@ impl Validator {
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit); let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
assert_eq!( assert_eq!(
blocktree.new_shreds_signals.len(), blockstore.new_shreds_signals.len(),
1, 1,
"New shred signal for the TVU should be the same as the clear bank signal." "New shred signal for the TVU should be the same as the clear bank signal."
); );
@ -359,7 +359,7 @@ impl Validator {
&bank_forks, &bank_forks,
&cluster_info, &cluster_info,
sockets, sockets,
blocktree.clone(), blockstore.clone(),
&storage_state, &storage_state,
config.blockstream_unix_socket.as_ref(), config.blockstream_unix_socket.as_ref(),
config.max_ledger_slots, config.max_ledger_slots,
@ -389,7 +389,7 @@ impl Validator {
node.sockets.broadcast, node.sockets.broadcast,
config.dev_sigverify_disabled, config.dev_sigverify_disabled,
transaction_status_sender, transaction_status_sender,
&blocktree, &blockstore,
&config.broadcast_stage_type, &config.broadcast_stage_type,
&exit, &exit,
shred_version, shred_version,
@ -470,9 +470,9 @@ impl Validator {
} }
} }
pub fn new_banks_from_blocktree( pub fn new_banks_from_blockstore(
expected_genesis_hash: Option<Hash>, expected_genesis_hash: Option<Hash>,
blocktree_path: &Path, blockstore_path: &Path,
account_paths: Vec<PathBuf>, account_paths: Vec<PathBuf>,
snapshot_config: Option<SnapshotConfig>, snapshot_config: Option<SnapshotConfig>,
poh_verify: bool, poh_verify: bool,
@ -482,14 +482,14 @@ pub fn new_banks_from_blocktree(
Hash, Hash,
BankForks, BankForks,
Vec<BankForksInfo>, Vec<BankForksInfo>,
Blocktree, Blockstore,
Receiver<bool>, Receiver<bool>,
CompletedSlotsReceiver, CompletedSlotsReceiver,
LeaderScheduleCache, LeaderScheduleCache,
PohConfig, PohConfig,
) { ) {
let genesis_config = GenesisConfig::load(blocktree_path).unwrap_or_else(|err| { let genesis_config = GenesisConfig::load(blockstore_path).unwrap_or_else(|err| {
error!("Failed to load genesis from {:?}: {}", blocktree_path, err); error!("Failed to load genesis from {:?}: {}", blockstore_path, err);
process::exit(1); process::exit(1);
}); });
let genesis_hash = genesis_config.hash(); let genesis_hash = genesis_config.hash();
@ -500,24 +500,24 @@ pub fn new_banks_from_blocktree(
error!("genesis hash mismatch: expected {}", expected_genesis_hash); error!("genesis hash mismatch: expected {}", expected_genesis_hash);
error!( error!(
"Delete the ledger directory to continue: {:?}", "Delete the ledger directory to continue: {:?}",
blocktree_path blockstore_path
); );
process::exit(1); process::exit(1);
} }
} }
let (blocktree, ledger_signal_receiver, completed_slots_receiver) = let (blockstore, ledger_signal_receiver, completed_slots_receiver) =
Blocktree::open_with_signal(blocktree_path).expect("Failed to open ledger database"); Blockstore::open_with_signal(blockstore_path).expect("Failed to open ledger database");
let process_options = blocktree_processor::ProcessOptions { let process_options = blockstore_processor::ProcessOptions {
poh_verify, poh_verify,
dev_halt_at_slot, dev_halt_at_slot,
..blocktree_processor::ProcessOptions::default() ..blockstore_processor::ProcessOptions::default()
}; };
let (mut bank_forks, bank_forks_info, mut leader_schedule_cache) = bank_forks_utils::load( let (mut bank_forks, bank_forks_info, mut leader_schedule_cache) = bank_forks_utils::load(
&genesis_config, &genesis_config,
&blocktree, &blockstore,
account_paths, account_paths,
snapshot_config.as_ref(), snapshot_config.as_ref(),
process_options, process_options,
@ -535,7 +535,7 @@ pub fn new_banks_from_blocktree(
genesis_hash, genesis_hash,
bank_forks, bank_forks,
bank_forks_info, bank_forks_info,
blocktree, blockstore,
ledger_signal_receiver, ledger_signal_receiver,
completed_slots_receiver, completed_slots_receiver,
leader_schedule_cache, leader_schedule_cache,

View File

@ -1,5 +1,5 @@
//! `window_service` handles the data plane incoming shreds, storing them in //! `window_service` handles the data plane incoming shreds, storing them in
//! blocktree and retransmitting where required //! blockstore and retransmitting where required
//! //!
use crate::cluster_info::ClusterInfo; use crate::cluster_info::ClusterInfo;
use crate::packet::Packets; use crate::packet::Packets;
@ -13,7 +13,7 @@ use rayon::iter::IntoParallelRefMutIterator;
use rayon::iter::ParallelIterator; use rayon::iter::ParallelIterator;
use rayon::ThreadPool; use rayon::ThreadPool;
use solana_ledger::bank_forks::BankForks; use solana_ledger::bank_forks::BankForks;
use solana_ledger::blocktree::{self, Blocktree, MAX_DATA_SHREDS_PER_SLOT}; use solana_ledger::blockstore::{self, Blockstore, MAX_DATA_SHREDS_PER_SLOT};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache; use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::shred::Shred; use solana_ledger::shred::Shred;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error}; use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
@ -30,7 +30,7 @@ use std::time::{Duration, Instant};
fn verify_shred_slot(shred: &Shred, root: u64) -> bool { fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
if shred.is_data() { if shred.is_data() {
// Only data shreds have parent information // Only data shreds have parent information
blocktree::verify_shred_slots(shred.slot(), shred.parent(), root) blockstore::verify_shred_slots(shred.slot(), shred.parent(), root)
} else { } else {
// Filter out outdated coding shreds // Filter out outdated coding shreds
shred.slot() >= root shred.slot() >= root
@ -75,7 +75,7 @@ pub fn should_retransmit_and_persist(
fn run_insert( fn run_insert(
shred_receiver: &CrossbeamReceiver<Vec<Shred>>, shred_receiver: &CrossbeamReceiver<Vec<Shred>>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>, leader_schedule_cache: &Arc<LeaderScheduleCache>,
) -> Result<()> { ) -> Result<()> {
let timer = Duration::from_millis(200); let timer = Duration::from_millis(200);
@ -85,15 +85,15 @@ fn run_insert(
shreds.append(&mut more_shreds) shreds.append(&mut more_shreds)
} }
let blocktree_insert_metrics = let blockstore_insert_metrics =
blocktree.insert_shreds(shreds, Some(leader_schedule_cache), false)?; blockstore.insert_shreds(shreds, Some(leader_schedule_cache), false)?;
blocktree_insert_metrics.report_metrics("recv-window-insert-shreds"); blockstore_insert_metrics.report_metrics("recv-window-insert-shreds");
Ok(()) Ok(())
} }
fn recv_window<F>( fn recv_window<F>(
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
insert_shred_sender: &CrossbeamSender<Vec<Shred>>, insert_shred_sender: &CrossbeamSender<Vec<Shred>>,
my_pubkey: &Pubkey, my_pubkey: &Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>, verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
@ -117,7 +117,7 @@ where
let now = Instant::now(); let now = Instant::now();
inc_new_counter_debug!("streamer-recv_window-recv", total_packets); inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
let last_root = blocktree.last_root(); let last_root = blockstore.last_root();
let shreds: Vec<_> = thread_pool.install(|| { let shreds: Vec<_> = thread_pool.install(|| {
packets packets
.par_iter_mut() .par_iter_mut()
@ -138,7 +138,7 @@ where
// get retransmitted. It'll allow peer nodes to see this shred // get retransmitted. It'll allow peer nodes to see this shred
// and trigger them to mark the slot as dead. // and trigger them to mark the slot as dead.
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 { if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
let _ = blocktree.set_dead_slot(shred.slot()); let _ = blockstore.set_dead_slot(shred.slot());
} }
packet.meta.slot = shred.slot(); packet.meta.slot = shred.slot();
packet.meta.seed = shred.seed(); packet.meta.seed = shred.seed();
@ -205,7 +205,7 @@ pub struct WindowService {
impl WindowService { impl WindowService {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn new<F>( pub fn new<F>(
blocktree: Arc<Blocktree>, blockstore: Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>, cluster_info: Arc<RwLock<ClusterInfo>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>, verified_receiver: CrossbeamReceiver<Vec<Packets>>,
retransmit: PacketSender, retransmit: PacketSender,
@ -227,7 +227,7 @@ impl WindowService {
}; };
let repair_service = RepairService::new( let repair_service = RepairService::new(
blocktree.clone(), blockstore.clone(),
exit.clone(), exit.clone(),
repair_socket, repair_socket,
cluster_info.clone(), cluster_info.clone(),
@ -238,7 +238,7 @@ impl WindowService {
let t_insert = Self::start_window_insert_thread( let t_insert = Self::start_window_insert_thread(
exit, exit,
&blocktree, &blockstore,
leader_schedule_cache, leader_schedule_cache,
insert_receiver, insert_receiver,
); );
@ -246,7 +246,7 @@ impl WindowService {
let t_window = Self::start_recv_window_thread( let t_window = Self::start_recv_window_thread(
cluster_info.read().unwrap().id(), cluster_info.read().unwrap().id(),
exit, exit,
&blocktree, &blockstore,
insert_sender, insert_sender,
verified_receiver, verified_receiver,
shred_filter, shred_filter,
@ -263,12 +263,12 @@ impl WindowService {
fn start_window_insert_thread( fn start_window_insert_thread(
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>, leader_schedule_cache: &Arc<LeaderScheduleCache>,
insert_receiver: CrossbeamReceiver<Vec<Shred>>, insert_receiver: CrossbeamReceiver<Vec<Shred>>,
) -> JoinHandle<()> { ) -> JoinHandle<()> {
let exit = exit.clone(); let exit = exit.clone();
let blocktree = blocktree.clone(); let blockstore = blockstore.clone();
let leader_schedule_cache = leader_schedule_cache.clone(); let leader_schedule_cache = leader_schedule_cache.clone();
let mut handle_timeout = || {}; let mut handle_timeout = || {};
let handle_error = || { let handle_error = || {
@ -281,7 +281,7 @@ impl WindowService {
break; break;
} }
if let Err(e) = run_insert(&insert_receiver, &blocktree, &leader_schedule_cache) { if let Err(e) = run_insert(&insert_receiver, &blockstore, &leader_schedule_cache) {
if Self::should_exit_on_error(e, &mut handle_timeout, &handle_error) { if Self::should_exit_on_error(e, &mut handle_timeout, &handle_error) {
break; break;
} }
@ -293,7 +293,7 @@ impl WindowService {
fn start_recv_window_thread<F>( fn start_recv_window_thread<F>(
id: Pubkey, id: Pubkey,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>, blockstore: &Arc<Blockstore>,
insert_sender: CrossbeamSender<Vec<Shred>>, insert_sender: CrossbeamSender<Vec<Shred>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>, verified_receiver: CrossbeamReceiver<Vec<Packets>>,
shred_filter: F, shred_filter: F,
@ -307,7 +307,7 @@ impl WindowService {
+ std::marker::Sync, + std::marker::Sync,
{ {
let exit = exit.clone(); let exit = exit.clone();
let blocktree = blocktree.clone(); let blockstore = blockstore.clone();
Builder::new() Builder::new()
.name("solana-window".to_string()) .name("solana-window".to_string())
.spawn(move || { .spawn(move || {
@ -334,7 +334,7 @@ impl WindowService {
} }
}; };
if let Err(e) = recv_window( if let Err(e) = recv_window(
&blocktree, &blockstore,
&insert_sender, &insert_sender,
&id, &id,
&verified_receiver, &verified_receiver,
@ -401,7 +401,7 @@ mod test {
use rand::thread_rng; use rand::thread_rng;
use solana_ledger::shred::DataShredHeader; use solana_ledger::shred::DataShredHeader;
use solana_ledger::{ use solana_ledger::{
blocktree::{make_many_slot_entries, Blocktree}, blockstore::{make_many_slot_entries, Blockstore},
entry::{create_ticks, Entry}, entry::{create_ticks, Entry},
get_tmp_ledger_path, get_tmp_ledger_path,
shred::Shredder, shred::Shredder,
@ -434,23 +434,23 @@ mod test {
#[test] #[test]
fn test_process_shred() { fn test_process_shred() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
let num_entries = 10; let num_entries = 10;
let original_entries = create_ticks(num_entries, 0, Hash::default()); let original_entries = create_ticks(num_entries, 0, Hash::default());
let mut shreds = local_entries_to_shred(&original_entries, 0, 0, &Arc::new(Keypair::new())); let mut shreds = local_entries_to_shred(&original_entries, 0, 0, &Arc::new(Keypair::new()));
shreds.reverse(); shreds.reverse();
blocktree blockstore
.insert_shreds(shreds, None, false) .insert_shreds(shreds, None, false)
.expect("Expect successful processing of shred"); .expect("Expect successful processing of shred");
assert_eq!( assert_eq!(
blocktree.get_slot_entries(0, 0, None).unwrap(), blockstore.get_slot_entries(0, 0, None).unwrap(),
original_entries original_entries
); );
drop(blocktree); drop(blockstore);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
@ -529,18 +529,18 @@ mod test {
verified_receiver: CrossbeamReceiver<Vec<Packets>>, verified_receiver: CrossbeamReceiver<Vec<Packets>>,
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
) -> WindowService { ) -> WindowService {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let (blocktree, _, _) = Blocktree::open_with_signal(&blocktree_path) let (blockstore, _, _) = Blockstore::open_with_signal(&blockstore_path)
.expect("Expected to be able to open database ledger"); .expect("Expected to be able to open database ledger");
let blocktree = Arc::new(blocktree); let blockstore = Arc::new(blockstore);
let (retransmit_sender, _retransmit_receiver) = channel(); let (retransmit_sender, _retransmit_receiver) = channel();
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair( let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::new_localhost(&Pubkey::default(), 0), ContactInfo::new_localhost(&Pubkey::default(), 0),
))); )));
let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap()); let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap());
let window = WindowService::new( let window = WindowService::new(
blocktree, blockstore,
cluster_info, cluster_info,
verified_receiver, verified_receiver,
retransmit_sender, retransmit_sender,

View File

@ -3,7 +3,7 @@
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use solana_core::ledger_cleanup_service::LedgerCleanupService; use solana_core::ledger_cleanup_service::LedgerCleanupService;
use solana_ledger::blocktree::{make_many_slot_entries, Blocktree}; use solana_ledger::blockstore::{make_many_slot_entries, Blockstore};
use solana_ledger::get_tmp_ledger_path; use solana_ledger::get_tmp_ledger_path;
use solana_ledger::shred::Shred; use solana_ledger::shred::Shred;
use std::collections::VecDeque; use std::collections::VecDeque;
@ -33,7 +33,7 @@ mod tests {
pub stop_size_bytes: u64, pub stop_size_bytes: u64,
pub stop_size_iterations: u64, pub stop_size_iterations: u64,
pub pre_generate_data: bool, pub pre_generate_data: bool,
pub cleanup_blocktree: bool, pub cleanup_blockstore: bool,
pub emit_cpu_info: bool, pub emit_cpu_info: bool,
pub assert_compaction: bool, pub assert_compaction: bool,
} }
@ -150,7 +150,7 @@ mod tests {
let stop_size_bytes = read_env("STOP_SIZE_BYTES", DEFAULT_STOP_SIZE_BYTES); let stop_size_bytes = read_env("STOP_SIZE_BYTES", DEFAULT_STOP_SIZE_BYTES);
let stop_size_iterations = read_env("STOP_SIZE_ITERATIONS", DEFAULT_STOP_SIZE_ITERATIONS); let stop_size_iterations = read_env("STOP_SIZE_ITERATIONS", DEFAULT_STOP_SIZE_ITERATIONS);
let pre_generate_data = read_env("PRE_GENERATE_DATA", false); let pre_generate_data = read_env("PRE_GENERATE_DATA", false);
let cleanup_blocktree = read_env("CLEANUP_BLOCKTREE", true); let cleanup_blockstore = read_env("CLEANUP_BLOCKSTORE", true);
let emit_cpu_info = read_env("EMIT_CPU_INFO", true); let emit_cpu_info = read_env("EMIT_CPU_INFO", true);
// set default to `true` once compaction is merged // set default to `true` once compaction is merged
let assert_compaction = read_env("ASSERT_COMPACTION", false); let assert_compaction = read_env("ASSERT_COMPACTION", false);
@ -163,7 +163,7 @@ mod tests {
stop_size_bytes, stop_size_bytes,
stop_size_iterations, stop_size_iterations,
pre_generate_data, pre_generate_data,
cleanup_blocktree, cleanup_blockstore,
emit_cpu_info, emit_cpu_info,
assert_compaction, assert_compaction,
} }
@ -181,11 +181,11 @@ mod tests {
batch_size: u64, batch_size: u64,
entries: u64, entries: u64,
max_slots: i64, max_slots: i64,
blocktree: &Blocktree, blockstore: &Blockstore,
cpu: &CpuStatsInner, cpu: &CpuStatsInner,
) { ) {
let time_now = Instant::now(); let time_now = Instant::now();
let storage_now = blocktree.storage_size().unwrap_or(0); let storage_now = blockstore.storage_size().unwrap_or(0);
let (cpu_user, cpu_system, cpu_idle) = (cpu.cpu_user, cpu.cpu_system, cpu.cpu_idle); let (cpu_user, cpu_system, cpu_idle) = (cpu.cpu_user, cpu.cpu_system, cpu.cpu_idle);
println!( println!(
@ -209,11 +209,11 @@ mod tests {
#[test] #[test]
fn test_ledger_cleanup_compaction() { fn test_ledger_cleanup_compaction() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
let config = get_benchmark_config(); let config = get_benchmark_config();
eprintln!("BENCHMARK CONFIG: {:?}", config); eprintln!("BENCHMARK CONFIG: {:?}", config);
eprintln!("LEDGER_PATH: {:?}", &blocktree_path); eprintln!("LEDGER_PATH: {:?}", &blockstore_path);
let benchmark_slots = config.benchmark_slots; let benchmark_slots = config.benchmark_slots;
let batch_size = config.batch_size; let batch_size = config.batch_size;
@ -227,7 +227,7 @@ mod tests {
let (sender, receiver) = channel(); let (sender, receiver) = channel();
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let cleaner = let cleaner =
LedgerCleanupService::new(receiver, blocktree.clone(), max_ledger_slots, &exit); LedgerCleanupService::new(receiver, blockstore.clone(), max_ledger_slots, &exit);
let exit_cpu = Arc::new(AtomicBool::new(false)); let exit_cpu = Arc::new(AtomicBool::new(false));
let sys = CpuStatsUpdater::new(&exit_cpu); let sys = CpuStatsUpdater::new(&exit_cpu);
@ -259,7 +259,7 @@ mod tests {
0, 0,
0, 0,
0, 0,
&blocktree, &blockstore,
&sys.get_stats(), &sys.get_stats(),
); );
@ -272,7 +272,7 @@ mod tests {
make_many_slot_entries(x, batch_size, entries_per_slot).0 make_many_slot_entries(x, batch_size, entries_per_slot).0
}; };
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
sender.send(x).unwrap(); sender.send(x).unwrap();
emit_stats( emit_stats(
@ -283,7 +283,7 @@ mod tests {
batch_size, batch_size,
batch_size, batch_size,
max_ledger_slots as i64, max_ledger_slots as i64,
&blocktree, &blockstore,
&sys.get_stats(), &sys.get_stats(),
); );
@ -313,13 +313,13 @@ mod tests {
0, 0,
0, 0,
max_ledger_slots as i64, max_ledger_slots as i64,
&blocktree, &blockstore,
&sys.get_stats(), &sys.get_stats(),
); );
// Poll on some compaction happening // Poll on some compaction happening
let start_poll = Instant::now(); let start_poll = Instant::now();
while blocktree.storage_size().unwrap_or(0) >= u1 { while blockstore.storage_size().unwrap_or(0) >= u1 {
if start_poll.elapsed().as_secs() > ROCKSDB_FLUSH_GRACE_PERIOD_SECS { if start_poll.elapsed().as_secs() > ROCKSDB_FLUSH_GRACE_PERIOD_SECS {
break; break;
} }
@ -334,7 +334,7 @@ mod tests {
0, 0,
0, 0,
max_ledger_slots as i64, max_ledger_slots as i64,
&blocktree, &blockstore,
&sys.get_stats(), &sys.get_stats(),
); );
@ -350,9 +350,10 @@ mod tests {
assert!(u2 < u1, "expected compaction! pre={},post={}", u1, u2); assert!(u2 < u1, "expected compaction! pre={},post={}", u1, u2);
} }
if config.cleanup_blocktree { if config.cleanup_blockstore {
drop(blocktree); drop(blockstore);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path)
.expect("Expected successful database destruction");
} }
} }
} }

View File

@ -7,9 +7,9 @@ mod tests {
use solana_core::storage_stage::{test_cluster_info, SLOTS_PER_TURN_TEST}; use solana_core::storage_stage::{test_cluster_info, SLOTS_PER_TURN_TEST};
use solana_core::storage_stage::{StorageStage, StorageState}; use solana_core::storage_stage::{StorageStage, StorageState};
use solana_ledger::bank_forks::BankForks; use solana_ledger::bank_forks::BankForks;
use solana_ledger::blocktree_processor; use solana_ledger::blockstore_processor;
use solana_ledger::entry; use solana_ledger::entry;
use solana_ledger::{blocktree::Blocktree, create_new_tmp_ledger}; use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger};
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT; use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
@ -44,7 +44,7 @@ mod tests {
.push(solana_storage_program::solana_storage_program!()); .push(solana_storage_program::solana_storage_program!());
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let bank = Bank::new(&genesis_config); let bank = Bank::new(&genesis_config);
let bank = Arc::new(bank); let bank = Arc::new(bank);
@ -63,7 +63,7 @@ mod tests {
let storage_stage = StorageStage::new( let storage_stage = StorageStage::new(
&storage_state, &storage_state,
bank_receiver, bank_receiver,
Some(blocktree.clone()), Some(blockstore.clone()),
&keypair, &keypair,
&storage_keypair, &storage_keypair,
&exit.clone(), &exit.clone(),
@ -109,7 +109,7 @@ mod tests {
let next_bank = Arc::new(Bank::new_from_parent(&bank, &keypair.pubkey(), 2)); let next_bank = Arc::new(Bank::new_from_parent(&bank, &keypair.pubkey(), 2));
//register ticks so the program reports a different segment //register ticks so the program reports a different segment
blocktree_processor::process_entries( blockstore_processor::process_entries(
&next_bank, &next_bank,
&entry::create_ticks( &entry::create_ticks(
DEFAULT_TICKS_PER_SLOT * next_bank.slots_per_segment() + 1, DEFAULT_TICKS_PER_SLOT * next_bank.slots_per_segment() + 1,
@ -164,7 +164,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let slot = 1; let slot = 1;
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks( let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(
@ -182,7 +182,7 @@ mod tests {
let storage_stage = StorageStage::new( let storage_stage = StorageStage::new(
&storage_state, &storage_state,
bank_receiver, bank_receiver,
Some(blocktree.clone()), Some(blockstore.clone()),
&keypair, &keypair,
&storage_keypair, &storage_keypair,
&exit.clone(), &exit.clone(),
@ -203,7 +203,7 @@ mod tests {
let rooted_banks = (slot..slot + last_bank.slots_per_segment() + 1) let rooted_banks = (slot..slot + last_bank.slots_per_segment() + 1)
.map(|i| { .map(|i| {
let bank = Arc::new(Bank::new_from_parent(&last_bank, &keypair.pubkey(), i)); let bank = Arc::new(Bank::new_from_parent(&last_bank, &keypair.pubkey(), i));
blocktree_processor::process_entries( blockstore_processor::process_entries(
&bank, &bank,
&entry::create_ticks(64, 0, bank.last_blockhash()), &entry::create_ticks(64, 0, bank.last_blockhash()),
true, true,

View File

@ -7,7 +7,7 @@ use solana_clap_utils::{
input_validators::{is_rfc3339_datetime, is_valid_percentage}, input_validators::{is_rfc3339_datetime, is_valid_percentage},
}; };
use solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account}; use solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account};
use solana_ledger::{blocktree::create_new_ledger, poh::compute_hashes_per_tick}; use solana_ledger::{blockstore::create_new_ledger, poh::compute_hashes_per_tick};
use solana_sdk::{ use solana_sdk::{
account::Account, account::Account,
clock, clock,

View File

@ -3,14 +3,14 @@ use clap::{
}; };
use histogram; use histogram;
use serde_json::json; use serde_json::json;
use solana_ledger::blocktree_db::Database; use solana_ledger::blockstore_db::Database;
use solana_ledger::{ use solana_ledger::{
bank_forks::{BankForks, SnapshotConfig}, bank_forks::{BankForks, SnapshotConfig},
bank_forks_utils, bank_forks_utils,
blocktree::Blocktree, blockstore::Blockstore,
blocktree_db, blockstore_db,
blocktree_db::Column, blockstore_db::Column,
blocktree_processor, blockstore_processor,
rooted_slot_iterator::RootedSlotIterator, rooted_slot_iterator::RootedSlotIterator,
}; };
use solana_sdk::{ use solana_sdk::{
@ -34,9 +34,9 @@ enum LedgerOutputMethod {
Json, Json,
} }
fn output_slot(blocktree: &Blocktree, slot: Slot, method: &LedgerOutputMethod) { fn output_slot(blockstore: &Blockstore, slot: Slot, method: &LedgerOutputMethod) {
println!("Slot Meta {:?}", blocktree.meta(slot)); println!("Slot Meta {:?}", blockstore.meta(slot));
let entries = blocktree let entries = blockstore
.get_slot_entries(slot, 0, None) .get_slot_entries(slot, 0, None)
.unwrap_or_else(|err| { .unwrap_or_else(|err| {
eprintln!("Failed to load entries for slot {}: {:?}", slot, err); eprintln!("Failed to load entries for slot {}: {:?}", slot, err);
@ -116,9 +116,9 @@ fn output_slot(blocktree: &Blocktree, slot: Slot, method: &LedgerOutputMethod) {
} }
} }
fn output_ledger(blocktree: Blocktree, starting_slot: Slot, method: LedgerOutputMethod) { fn output_ledger(blockstore: Blockstore, starting_slot: Slot, method: LedgerOutputMethod) {
let rooted_slot_iterator = let rooted_slot_iterator =
RootedSlotIterator::new(starting_slot, &blocktree).unwrap_or_else(|err| { RootedSlotIterator::new(starting_slot, &blockstore).unwrap_or_else(|err| {
eprintln!( eprintln!(
"Failed to load entries starting from slot {}: {:?}", "Failed to load entries starting from slot {}: {:?}",
starting_slot, err starting_slot, err
@ -139,7 +139,7 @@ fn output_ledger(blocktree: Blocktree, starting_slot: Slot, method: LedgerOutput
} }
} }
output_slot(&blocktree, slot, &method); output_slot(&blockstore, slot, &method);
} }
if method == LedgerOutputMethod::Json { if method == LedgerOutputMethod::Json {
@ -174,7 +174,7 @@ fn render_dot(dot: String, output_file: &str, output_format: &str) -> io::Result
#[allow(clippy::cognitive_complexity)] #[allow(clippy::cognitive_complexity)]
fn graph_forks( fn graph_forks(
bank_forks: BankForks, bank_forks: BankForks,
bank_forks_info: Vec<blocktree_processor::BankForksInfo>, bank_forks_info: Vec<blockstore_processor::BankForksInfo>,
include_all_votes: bool, include_all_votes: bool,
) -> String { ) -> String {
// Search all forks and collect the last vote made by each validator // Search all forks and collect the last vote made by each validator
@ -394,7 +394,7 @@ fn graph_forks(
dot.join("\n") dot.join("\n")
} }
fn analyze_column<T: solana_ledger::blocktree_db::Column>( fn analyze_column<T: solana_ledger::blockstore_db::Column>(
db: &Database, db: &Database,
name: &str, name: &str,
key_size: usize, key_size: usize,
@ -404,7 +404,7 @@ fn analyze_column<T: solana_ledger::blocktree_db::Column>(
let mut val_tot: u64 = 0; let mut val_tot: u64 = 0;
let mut row_hist = histogram::Histogram::new(); let mut row_hist = histogram::Histogram::new();
let a = key_size as u64; let a = key_size as u64;
for (_x, y) in db.iter::<T>(blocktree_db::IteratorMode::Start).unwrap() { for (_x, y) in db.iter::<T>(blockstore_db::IteratorMode::Start).unwrap() {
let b = y.len() as u64; let b = y.len() as u64;
key_tot += a; key_tot += a;
val_hist.increment(b).unwrap(); val_hist.increment(b).unwrap();
@ -464,7 +464,7 @@ fn analyze_column<T: solana_ledger::blocktree_db::Column>(
} }
fn analyze_storage(database: &Database) -> Result<(), String> { fn analyze_storage(database: &Database) -> Result<(), String> {
use blocktree_db::columns::*; use blockstore_db::columns::*;
analyze_column::<SlotMeta>(database, "SlotMeta", SlotMeta::key_size())?; analyze_column::<SlotMeta>(database, "SlotMeta", SlotMeta::key_size())?;
analyze_column::<Orphans>(database, "Orphans", Orphans::key_size())?; analyze_column::<Orphans>(database, "Orphans", Orphans::key_size())?;
analyze_column::<DeadSlots>(database, "DeadSlots", DeadSlots::key_size())?; analyze_column::<DeadSlots>(database, "DeadSlots", DeadSlots::key_size())?;
@ -492,9 +492,9 @@ fn open_genesis_config(ledger_path: &Path) -> GenesisConfig {
}) })
} }
fn open_blocktree(ledger_path: &Path) -> Blocktree { fn open_blockstore(ledger_path: &Path) -> Blockstore {
match Blocktree::open(ledger_path) { match Blockstore::open(ledger_path) {
Ok(blocktree) => blocktree, Ok(blockstore) => blockstore,
Err(err) => { Err(err) => {
eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err); eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err);
exit(1); exit(1);
@ -669,7 +669,7 @@ fn main() {
("print", Some(args_matches)) => { ("print", Some(args_matches)) => {
let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot); let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot);
output_ledger( output_ledger(
open_blocktree(&ledger_path), open_blockstore(&ledger_path),
starting_slot, starting_slot,
LedgerOutputMethod::Print, LedgerOutputMethod::Print,
); );
@ -682,7 +682,7 @@ fn main() {
for slot in slots { for slot in slots {
println!("Slot {}", slot); println!("Slot {}", slot);
output_slot( output_slot(
&open_blocktree(&ledger_path), &open_blockstore(&ledger_path),
slot, slot,
&LedgerOutputMethod::Print, &LedgerOutputMethod::Print,
); );
@ -691,7 +691,7 @@ fn main() {
("json", Some(args_matches)) => { ("json", Some(args_matches)) => {
let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot); let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot);
output_ledger( output_ledger(
open_blocktree(&ledger_path), open_blockstore(&ledger_path),
starting_slot, starting_slot,
LedgerOutputMethod::Json, LedgerOutputMethod::Json,
); );
@ -717,15 +717,15 @@ fn main() {
vec![ledger_path.join("accounts")] vec![ledger_path.join("accounts")]
}; };
let process_options = blocktree_processor::ProcessOptions { let process_options = blockstore_processor::ProcessOptions {
poh_verify, poh_verify,
dev_halt_at_slot, dev_halt_at_slot,
..blocktree_processor::ProcessOptions::default() ..blockstore_processor::ProcessOptions::default()
}; };
match bank_forks_utils::load( match bank_forks_utils::load(
&open_genesis_config(&ledger_path), &open_genesis_config(&ledger_path),
&open_blocktree(&ledger_path), &open_blockstore(&ledger_path),
account_paths, account_paths,
snapshot_config.as_ref(), snapshot_config.as_ref(),
process_options, process_options,
@ -764,17 +764,17 @@ fn main() {
} }
("prune", Some(args_matches)) => { ("prune", Some(args_matches)) => {
if let Some(prune_file_path) = args_matches.value_of("slot_list") { if let Some(prune_file_path) = args_matches.value_of("slot_list") {
let blocktree = open_blocktree(&ledger_path); let blockstore = open_blockstore(&ledger_path);
let prune_file = File::open(prune_file_path.to_string()).unwrap(); let prune_file = File::open(prune_file_path.to_string()).unwrap();
let slot_hashes: BTreeMap<u64, String> = let slot_hashes: BTreeMap<u64, String> =
serde_yaml::from_reader(prune_file).unwrap(); serde_yaml::from_reader(prune_file).unwrap();
let iter = let iter =
RootedSlotIterator::new(0, &blocktree).expect("Failed to get rooted slot"); RootedSlotIterator::new(0, &blockstore).expect("Failed to get rooted slot");
let potential_hashes: Vec<_> = iter let potential_hashes: Vec<_> = iter
.filter_map(|(slot, _meta)| { .filter_map(|(slot, _meta)| {
let blockhash = blocktree let blockhash = blockstore
.get_slot_entries(slot, 0, None) .get_slot_entries(slot, 0, None)
.unwrap() .unwrap()
.last() .last()
@ -796,11 +796,11 @@ fn main() {
.last() .last()
.expect("Failed to find a valid slot"); .expect("Failed to find a valid slot");
println!("Prune at slot {:?} hash {:?}", target_slot, target_hash); println!("Prune at slot {:?} hash {:?}", target_slot, target_hash);
blocktree.prune(*target_slot); blockstore.prune(*target_slot);
} }
} }
("list-roots", Some(args_matches)) => { ("list-roots", Some(args_matches)) => {
let blocktree = open_blocktree(&ledger_path); let blockstore = open_blockstore(&ledger_path);
let max_height = if let Some(height) = args_matches.value_of("max_height") { let max_height = if let Some(height) = args_matches.value_of("max_height") {
usize::from_str(height).expect("Maximum height must be a number") usize::from_str(height).expect("Maximum height must be a number")
} else { } else {
@ -812,12 +812,12 @@ fn main() {
usize::from_str(DEFAULT_ROOT_COUNT).unwrap() usize::from_str(DEFAULT_ROOT_COUNT).unwrap()
}; };
let iter = RootedSlotIterator::new(0, &blocktree).expect("Failed to get rooted slot"); let iter = RootedSlotIterator::new(0, &blockstore).expect("Failed to get rooted slot");
let slot_hash: Vec<_> = iter let slot_hash: Vec<_> = iter
.filter_map(|(slot, _meta)| { .filter_map(|(slot, _meta)| {
if slot <= max_height as u64 { if slot <= max_height as u64 {
let blockhash = blocktree let blockhash = blockstore
.get_slot_entries(slot, 0, None) .get_slot_entries(slot, 0, None)
.unwrap() .unwrap()
.last() .last()
@ -853,7 +853,7 @@ fn main() {
}); });
} }
("bounds", Some(args_matches)) => { ("bounds", Some(args_matches)) => {
match open_blocktree(&ledger_path).slot_meta_iterator(0) { match open_blockstore(&ledger_path).slot_meta_iterator(0) {
Ok(metas) => { Ok(metas) => {
let all = args_matches.is_present("all"); let all = args_matches.is_present("all");

View File

@ -1,7 +1,7 @@
use crate::{ use crate::{
bank_forks::{BankForks, SnapshotConfig}, bank_forks::{BankForks, SnapshotConfig},
blocktree::Blocktree, blockstore::Blockstore,
blocktree_processor::{self, BankForksInfo, BlocktreeProcessorError, ProcessOptions}, blockstore_processor::{self, BankForksInfo, BlockstoreProcessorError, ProcessOptions},
leader_schedule_cache::LeaderScheduleCache, leader_schedule_cache::LeaderScheduleCache,
snapshot_utils, snapshot_utils,
}; };
@ -11,11 +11,11 @@ use std::{fs, path::PathBuf, sync::Arc};
pub fn load( pub fn load(
genesis_config: &GenesisConfig, genesis_config: &GenesisConfig,
blocktree: &Blocktree, blockstore: &Blockstore,
account_paths: Vec<PathBuf>, account_paths: Vec<PathBuf>,
snapshot_config: Option<&SnapshotConfig>, snapshot_config: Option<&SnapshotConfig>,
process_options: ProcessOptions, process_options: ProcessOptions,
) -> Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> { ) -> Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlockstoreProcessorError> {
if let Some(snapshot_config) = snapshot_config.as_ref() { if let Some(snapshot_config) = snapshot_config.as_ref() {
info!( info!(
"Initializing snapshot path: {:?}", "Initializing snapshot path: {:?}",
@ -42,9 +42,9 @@ pub fn load(
) )
.expect("Load from snapshot failed"); .expect("Load from snapshot failed");
return blocktree_processor::process_blocktree_from_root( return blockstore_processor::process_blockstore_from_root(
genesis_config, genesis_config,
blocktree, blockstore,
Arc::new(deserialized_bank), Arc::new(deserialized_bank),
&process_options, &process_options,
); );
@ -56,9 +56,9 @@ pub fn load(
} }
info!("Processing ledger from genesis"); info!("Processing ledger from genesis");
blocktree_processor::process_blocktree( blockstore_processor::process_blockstore(
&genesis_config, &genesis_config,
&blocktree, &blockstore,
account_paths, account_paths,
process_options, process_options,
) )

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
use crate::blocktree_meta; use crate::blockstore_meta;
use bincode::{deserialize, serialize}; use bincode::{deserialize, serialize};
use byteorder::{BigEndian, ByteOrder}; use byteorder::{BigEndian, ByteOrder};
use fs_extra; use fs_extra;
@ -36,7 +36,7 @@ const CODE_SHRED_CF: &str = "code_shred";
const TRANSACTION_STATUS_CF: &str = "transaction_status"; const TRANSACTION_STATUS_CF: &str = "transaction_status";
#[derive(Error, Debug)] #[derive(Error, Debug)]
pub enum BlocktreeError { pub enum BlockstoreError {
ShredForIndexExists, ShredForIndexExists,
InvalidShredData(Box<bincode::ErrorKind>), InvalidShredData(Box<bincode::ErrorKind>),
RocksDb(#[from] rocksdb::Error), RocksDb(#[from] rocksdb::Error),
@ -46,11 +46,11 @@ pub enum BlocktreeError {
Serialize(#[from] Box<bincode::ErrorKind>), Serialize(#[from] Box<bincode::ErrorKind>),
FsExtraError(#[from] fs_extra::error::Error), FsExtraError(#[from] fs_extra::error::Error),
} }
pub(crate) type Result<T> = std::result::Result<T, BlocktreeError>; pub(crate) type Result<T> = std::result::Result<T, BlockstoreError>;
impl std::fmt::Display for BlocktreeError { impl std::fmt::Display for BlockstoreError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "blocktree error") write!(f, "blockstore error")
} }
} }
@ -344,7 +344,7 @@ impl Column for columns::Index {
} }
impl TypedColumn for columns::Index { impl TypedColumn for columns::Index {
type Type = blocktree_meta::Index; type Type = blockstore_meta::Index;
} }
impl Column for columns::DeadSlots { impl Column for columns::DeadSlots {
@ -452,7 +452,7 @@ impl Column for columns::SlotMeta {
} }
impl TypedColumn for columns::SlotMeta { impl TypedColumn for columns::SlotMeta {
type Type = blocktree_meta::SlotMeta; type Type = blockstore_meta::SlotMeta;
} }
impl Column for columns::ErasureMeta { impl Column for columns::ErasureMeta {
@ -483,7 +483,7 @@ impl Column for columns::ErasureMeta {
} }
impl TypedColumn for columns::ErasureMeta { impl TypedColumn for columns::ErasureMeta {
type Type = blocktree_meta::ErasureMeta; type Type = blockstore_meta::ErasureMeta;
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]

View File

@ -131,7 +131,7 @@ impl SlotMeta {
// Should never happen // Should never happen
if self.consumed > self.last_index + 1 { if self.consumed > self.last_index + 1 {
datapoint!( datapoint!(
"blocktree_error", "blockstore_error",
( (
"error", "error",
format!( format!(

View File

@ -1,8 +1,8 @@
use crate::{ use crate::{
bank_forks::BankForks, bank_forks::BankForks,
block_error::BlockError, block_error::BlockError,
blocktree::Blocktree, blockstore::Blockstore,
blocktree_meta::SlotMeta, blockstore_meta::SlotMeta,
entry::{create_ticks, Entry, EntrySlice}, entry::{create_ticks, Entry, EntrySlice},
leader_schedule_cache::LeaderScheduleCache, leader_schedule_cache::LeaderScheduleCache,
}; };
@ -235,7 +235,7 @@ pub struct BankForksInfo {
} }
#[derive(Error, Debug, PartialEq)] #[derive(Error, Debug, PartialEq)]
pub enum BlocktreeProcessorError { pub enum BlockstoreProcessorError {
#[error("failed to load entries")] #[error("failed to load entries")]
FailedToLoadEntries, FailedToLoadEntries,
@ -252,7 +252,7 @@ pub enum BlocktreeProcessorError {
NoValidForksFound, NoValidForksFound,
} }
/// Callback for accessing bank state while processing the blocktree /// Callback for accessing bank state while processing the blockstore
pub type ProcessCallback = Arc<dyn Fn(&Bank) -> () + Sync + Send>; pub type ProcessCallback = Arc<dyn Fn(&Bank) -> () + Sync + Send>;
#[derive(Default, Clone)] #[derive(Default, Clone)]
@ -264,12 +264,13 @@ pub struct ProcessOptions {
pub override_num_threads: Option<usize>, pub override_num_threads: Option<usize>,
} }
pub fn process_blocktree( pub fn process_blockstore(
genesis_config: &GenesisConfig, genesis_config: &GenesisConfig,
blocktree: &Blocktree, blockstore: &Blockstore,
account_paths: Vec<PathBuf>, account_paths: Vec<PathBuf>,
opts: ProcessOptions, opts: ProcessOptions,
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> { ) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlockstoreProcessorError>
{
if let Some(num_threads) = opts.override_num_threads { if let Some(num_threads) = opts.override_num_threads {
PAR_THREAD_POOL.with(|pool| { PAR_THREAD_POOL.with(|pool| {
*pool.borrow_mut() = rayon::ThreadPoolBuilder::new() *pool.borrow_mut() = rayon::ThreadPoolBuilder::new()
@ -282,17 +283,18 @@ pub fn process_blocktree(
// Setup bank for slot 0 // Setup bank for slot 0
let bank0 = Arc::new(Bank::new_with_paths(&genesis_config, account_paths)); let bank0 = Arc::new(Bank::new_with_paths(&genesis_config, account_paths));
info!("processing ledger for slot 0..."); info!("processing ledger for slot 0...");
process_bank_0(&bank0, blocktree, &opts)?; process_bank_0(&bank0, blockstore, &opts)?;
process_blocktree_from_root(genesis_config, blocktree, bank0, &opts) process_blockstore_from_root(genesis_config, blockstore, bank0, &opts)
} }
// Process blocktree from a known root bank // Process blockstore from a known root bank
pub fn process_blocktree_from_root( pub fn process_blockstore_from_root(
genesis_config: &GenesisConfig, genesis_config: &GenesisConfig,
blocktree: &Blocktree, blockstore: &Blockstore,
bank: Arc<Bank>, bank: Arc<Bank>,
opts: &ProcessOptions, opts: &ProcessOptions,
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> { ) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlockstoreProcessorError>
{
info!("processing ledger from root slot {}...", bank.slot()); info!("processing ledger from root slot {}...", bank.slot());
let allocated = thread_mem_usage::Allocatedp::default(); let allocated = thread_mem_usage::Allocatedp::default();
let initial_allocation = allocated.get(); let initial_allocation = allocated.get();
@ -307,13 +309,13 @@ pub fn process_blocktree_from_root(
genesis_config.operating_mode, genesis_config.operating_mode,
)); ));
blocktree blockstore
.set_roots(&[start_slot]) .set_roots(&[start_slot])
.expect("Couldn't set root slot on startup"); .expect("Couldn't set root slot on startup");
let meta = blocktree.meta(start_slot).unwrap(); let meta = blockstore.meta(start_slot).unwrap();
// Iterate and replay slots from blocktree starting from `start_slot` // Iterate and replay slots from blockstore starting from `start_slot`
let (bank_forks, bank_forks_info, leader_schedule_cache) = { let (bank_forks, bank_forks_info, leader_schedule_cache) = {
if let Some(meta) = meta { if let Some(meta) = meta {
let epoch_schedule = bank.epoch_schedule(); let epoch_schedule = bank.epoch_schedule();
@ -324,7 +326,7 @@ pub fn process_blocktree_from_root(
let fork_info = process_pending_slots( let fork_info = process_pending_slots(
&bank, &bank,
&meta, &meta,
blocktree, blockstore,
&mut leader_schedule_cache, &mut leader_schedule_cache,
&mut rooted_path, &mut rooted_path,
opts, opts,
@ -332,13 +334,13 @@ pub fn process_blocktree_from_root(
let (banks, bank_forks_info): (Vec<_>, Vec<_>) = let (banks, bank_forks_info): (Vec<_>, Vec<_>) =
fork_info.into_iter().map(|(_, v)| v).unzip(); fork_info.into_iter().map(|(_, v)| v).unzip();
if banks.is_empty() { if banks.is_empty() {
return Err(BlocktreeProcessorError::NoValidForksFound); return Err(BlockstoreProcessorError::NoValidForksFound);
} }
let bank_forks = BankForks::new_from_banks(&banks, rooted_path); let bank_forks = BankForks::new_from_banks(&banks, rooted_path);
(bank_forks, bank_forks_info, leader_schedule_cache) (bank_forks, bank_forks_info, leader_schedule_cache)
} else { } else {
// If there's no meta for the input `start_slot`, then we started from a snapshot // If there's no meta for the input `start_slot`, then we started from a snapshot
// and there's no point in processing the rest of blocktree and implies blocktree // and there's no point in processing the rest of blockstore and implies blockstore
// should be empty past this point. // should be empty past this point.
let bfi = BankForksInfo { let bfi = BankForksInfo {
bank_slot: start_slot, bank_slot: start_slot,
@ -369,7 +371,7 @@ fn verify_and_process_slot_entries(
entries: &[Entry], entries: &[Entry],
last_entry_hash: Hash, last_entry_hash: Hash,
opts: &ProcessOptions, opts: &ProcessOptions,
) -> result::Result<Hash, BlocktreeProcessorError> { ) -> result::Result<Hash, BlockstoreProcessorError> {
assert!(!entries.is_empty()); assert!(!entries.is_empty());
if opts.poh_verify { if opts.poh_verify {
@ -409,7 +411,7 @@ fn verify_and_process_slot_entries(
bank.slot(), bank.slot(),
err err
); );
BlocktreeProcessorError::InvalidTransaction BlockstoreProcessorError::InvalidTransaction
})?; })?;
Ok(entries.last().unwrap().hash) Ok(entries.last().unwrap().hash)
@ -418,15 +420,15 @@ fn verify_and_process_slot_entries(
// Special handling required for processing the entries in slot 0 // Special handling required for processing the entries in slot 0
fn process_bank_0( fn process_bank_0(
bank0: &Arc<Bank>, bank0: &Arc<Bank>,
blocktree: &Blocktree, blockstore: &Blockstore,
opts: &ProcessOptions, opts: &ProcessOptions,
) -> result::Result<(), BlocktreeProcessorError> { ) -> result::Result<(), BlockstoreProcessorError> {
assert_eq!(bank0.slot(), 0); assert_eq!(bank0.slot(), 0);
// Fetch all entries for this slot // Fetch all entries for this slot
let entries = blocktree.get_slot_entries(0, 0, None).map_err(|err| { let entries = blockstore.get_slot_entries(0, 0, None).map_err(|err| {
warn!("Failed to load entries for slot 0, err: {:?}", err); warn!("Failed to load entries for slot 0, err: {:?}", err);
BlocktreeProcessorError::FailedToLoadEntries BlockstoreProcessorError::FailedToLoadEntries
})?; })?;
verify_and_process_slot_entries(bank0, &entries, bank0.last_blockhash(), opts) verify_and_process_slot_entries(bank0, &entries, bank0.last_blockhash(), opts)
@ -442,11 +444,11 @@ fn process_bank_0(
fn process_next_slots( fn process_next_slots(
bank: &Arc<Bank>, bank: &Arc<Bank>,
meta: &SlotMeta, meta: &SlotMeta,
blocktree: &Blocktree, blockstore: &Blockstore,
leader_schedule_cache: &LeaderScheduleCache, leader_schedule_cache: &LeaderScheduleCache,
pending_slots: &mut Vec<(SlotMeta, Arc<Bank>, Hash)>, pending_slots: &mut Vec<(SlotMeta, Arc<Bank>, Hash)>,
fork_info: &mut HashMap<u64, (Arc<Bank>, BankForksInfo)>, fork_info: &mut HashMap<u64, (Arc<Bank>, BankForksInfo)>,
) -> result::Result<(), BlocktreeProcessorError> { ) -> result::Result<(), BlockstoreProcessorError> {
if let Some(parent) = bank.parent() { if let Some(parent) = bank.parent() {
fork_info.remove(&parent.slot()); fork_info.remove(&parent.slot());
} }
@ -461,15 +463,15 @@ fn process_next_slots(
// This is a fork point if there are multiple children, create a new child bank for each fork // This is a fork point if there are multiple children, create a new child bank for each fork
for next_slot in &meta.next_slots { for next_slot in &meta.next_slots {
let next_meta = blocktree let next_meta = blockstore
.meta(*next_slot) .meta(*next_slot)
.map_err(|err| { .map_err(|err| {
warn!("Failed to load meta for slot {}: {:?}", next_slot, err); warn!("Failed to load meta for slot {}: {:?}", next_slot, err);
BlocktreeProcessorError::FailedToLoadMeta BlockstoreProcessorError::FailedToLoadMeta
})? })?
.unwrap(); .unwrap();
// Only process full slots in blocktree_processor, replay_stage // Only process full slots in blockstore_processor, replay_stage
// handles any partials // handles any partials
if next_meta.is_full() { if next_meta.is_full() {
let allocated = thread_mem_usage::Allocatedp::default(); let allocated = thread_mem_usage::Allocatedp::default();
@ -497,16 +499,16 @@ fn process_next_slots(
Ok(()) Ok(())
} }
// Iterate through blocktree processing slots starting from the root slot pointed to by the // Iterate through blockstore processing slots starting from the root slot pointed to by the
// given `meta` // given `meta`
fn process_pending_slots( fn process_pending_slots(
root_bank: &Arc<Bank>, root_bank: &Arc<Bank>,
root_meta: &SlotMeta, root_meta: &SlotMeta,
blocktree: &Blocktree, blockstore: &Blockstore,
leader_schedule_cache: &mut LeaderScheduleCache, leader_schedule_cache: &mut LeaderScheduleCache,
rooted_path: &mut Vec<u64>, rooted_path: &mut Vec<u64>,
opts: &ProcessOptions, opts: &ProcessOptions,
) -> result::Result<HashMap<u64, (Arc<Bank>, BankForksInfo)>, BlocktreeProcessorError> { ) -> result::Result<HashMap<u64, (Arc<Bank>, BankForksInfo)>, BlockstoreProcessorError> {
let mut fork_info = HashMap::new(); let mut fork_info = HashMap::new();
let mut last_status_report = Instant::now(); let mut last_status_report = Instant::now();
let mut pending_slots = vec![]; let mut pending_slots = vec![];
@ -514,7 +516,7 @@ fn process_pending_slots(
process_next_slots( process_next_slots(
root_bank, root_bank,
root_meta, root_meta,
blocktree, blockstore,
leader_schedule_cache, leader_schedule_cache,
&mut pending_slots, &mut pending_slots,
&mut fork_info, &mut fork_info,
@ -535,11 +537,11 @@ fn process_pending_slots(
let allocated = thread_mem_usage::Allocatedp::default(); let allocated = thread_mem_usage::Allocatedp::default();
let initial_allocation = allocated.get(); let initial_allocation = allocated.get();
if process_single_slot(blocktree, &bank, &last_entry_hash, opts).is_err() { if process_single_slot(blockstore, &bank, &last_entry_hash, opts).is_err() {
continue; continue;
} }
if blocktree.is_root(slot) { if blockstore.is_root(slot) {
let parents = bank.parents().into_iter().map(|b| b.slot()).rev().skip(1); let parents = bank.parents().into_iter().map(|b| b.slot()).rev().skip(1);
let parents: Vec<_> = parents.collect(); let parents: Vec<_> = parents.collect();
rooted_path.extend(parents); rooted_path.extend(parents);
@ -565,7 +567,7 @@ fn process_pending_slots(
process_next_slots( process_next_slots(
&bank, &bank,
&meta, &meta,
blocktree, blockstore,
leader_schedule_cache, leader_schedule_cache,
&mut pending_slots, &mut pending_slots,
&mut fork_info, &mut fork_info,
@ -578,17 +580,17 @@ fn process_pending_slots(
// Processes and replays the contents of a single slot, returns Error // Processes and replays the contents of a single slot, returns Error
// if failed to play the slot // if failed to play the slot
fn process_single_slot( fn process_single_slot(
blocktree: &Blocktree, blockstore: &Blockstore,
bank: &Arc<Bank>, bank: &Arc<Bank>,
last_entry_hash: &Hash, last_entry_hash: &Hash,
opts: &ProcessOptions, opts: &ProcessOptions,
) -> result::Result<(), BlocktreeProcessorError> { ) -> result::Result<(), BlockstoreProcessorError> {
let slot = bank.slot(); let slot = bank.slot();
// Fetch all entries for this slot // Fetch all entries for this slot
let entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| { let entries = blockstore.get_slot_entries(slot, 0, None).map_err(|err| {
warn!("Failed to load entries for slot {}: {:?}", slot, err); warn!("Failed to load entries for slot {}: {:?}", slot, err);
BlocktreeProcessorError::FailedToLoadEntries BlockstoreProcessorError::FailedToLoadEntries
})?; })?;
// If this errors with a fatal error, should mark the slot as dead so // If this errors with a fatal error, should mark the slot as dead so
@ -634,8 +636,8 @@ pub fn send_transaction_status_batch(
} }
// used for tests only // used for tests only
pub fn fill_blocktree_slot_with_ticks( pub fn fill_blockstore_slot_with_ticks(
blocktree: &Blocktree, blockstore: &Blockstore,
ticks_per_slot: u64, ticks_per_slot: u64,
slot: u64, slot: u64,
parent_slot: u64, parent_slot: u64,
@ -647,7 +649,7 @@ pub fn fill_blocktree_slot_with_ticks(
let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash); let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash);
let last_entry_hash = entries.last().unwrap().hash; let last_entry_hash = entries.last().unwrap().hash;
blocktree blockstore
.write_entries( .write_entries(
slot, slot,
0, 0,
@ -688,7 +690,7 @@ pub mod tests {
use std::sync::RwLock; use std::sync::RwLock;
#[test] #[test]
fn test_process_blocktree_with_missing_hashes() { fn test_process_blockstore_with_missing_hashes() {
solana_logger::setup(); solana_logger::setup();
let hashes_per_tick = 2; let hashes_per_tick = 2;
@ -699,14 +701,14 @@ pub mod tests {
let ticks_per_slot = genesis_config.ticks_per_slot; let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = let blockstore =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger");
let parent_slot = 0; let parent_slot = 0;
let slot = 1; let slot = 1;
let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash); let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash);
assert_matches!( assert_matches!(
blocktree.write_entries( blockstore.write_entries(
slot, slot,
0, 0,
0, 0,
@ -720,9 +722,9 @@ pub mod tests {
Ok(_) Ok(_)
); );
let (_bank_forks, bank_forks_info, _) = process_blocktree( let (_bank_forks, bank_forks_info, _) = process_blockstore(
&genesis_config, &genesis_config,
&blocktree, &blockstore,
Vec::new(), Vec::new(),
ProcessOptions { ProcessOptions {
poh_verify: true, poh_verify: true,
@ -734,7 +736,7 @@ pub mod tests {
} }
#[test] #[test]
fn test_process_blocktree_with_invalid_slot_tick_count() { fn test_process_blockstore_with_invalid_slot_tick_count() {
solana_logger::setup(); solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
@ -742,14 +744,14 @@ pub mod tests {
// Create a new ledger with slot 0 full of ticks // Create a new ledger with slot 0 full of ticks
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
// Write slot 1 with one tick missing // Write slot 1 with one tick missing
let parent_slot = 0; let parent_slot = 0;
let slot = 1; let slot = 1;
let entries = create_ticks(ticks_per_slot - 1, 0, blockhash); let entries = create_ticks(ticks_per_slot - 1, 0, blockhash);
assert_matches!( assert_matches!(
blocktree.write_entries( blockstore.write_entries(
slot, slot,
0, 0,
0, 0,
@ -764,9 +766,9 @@ pub mod tests {
); );
// Should return slot 0, the last slot on the fork that is valid // Should return slot 0, the last slot on the fork that is valid
let (_bank_forks, bank_forks_info, _) = process_blocktree( let (_bank_forks, bank_forks_info, _) = process_blockstore(
&genesis_config, &genesis_config,
&blocktree, &blockstore,
Vec::new(), Vec::new(),
ProcessOptions { ProcessOptions {
poh_verify: true, poh_verify: true,
@ -778,11 +780,11 @@ pub mod tests {
// Write slot 2 fully // Write slot 2 fully
let _last_slot2_entry_hash = let _last_slot2_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 0, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash);
let (_bank_forks, bank_forks_info, _) = process_blocktree( let (_bank_forks, bank_forks_info, _) = process_blockstore(
&genesis_config, &genesis_config,
&blocktree, &blockstore,
Vec::new(), Vec::new(),
ProcessOptions { ProcessOptions {
poh_verify: true, poh_verify: true,
@ -791,12 +793,12 @@ pub mod tests {
) )
.unwrap(); .unwrap();
// One valid fork, one bad fork. process_blocktree() should only return the valid fork // One valid fork, one bad fork. process_blockstore() should only return the valid fork
assert_eq!(bank_forks_info, vec![BankForksInfo { bank_slot: 2 }]); assert_eq!(bank_forks_info, vec![BankForksInfo { bank_slot: 2 }]);
} }
#[test] #[test]
fn test_process_blocktree_with_slot_with_trailing_entry() { fn test_process_blockstore_with_slot_with_trailing_entry() {
solana_logger::setup(); solana_logger::setup();
let GenesisConfigInfo { let GenesisConfigInfo {
@ -807,7 +809,7 @@ pub mod tests {
let ticks_per_slot = genesis_config.ticks_per_slot; let ticks_per_slot = genesis_config.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut entries = create_ticks(ticks_per_slot, 0, blockhash); let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
let trailing_entry = { let trailing_entry = {
@ -817,12 +819,12 @@ pub mod tests {
}; };
entries.push(trailing_entry); entries.push(trailing_entry);
// Tricks blocktree into writing the trailing entry by lying that there is one more tick // Tricks blockstore into writing the trailing entry by lying that there is one more tick
// per slot. // per slot.
let parent_slot = 0; let parent_slot = 0;
let slot = 1; let slot = 1;
assert_matches!( assert_matches!(
blocktree.write_entries( blockstore.write_entries(
slot, slot,
0, 0,
0, 0,
@ -841,19 +843,19 @@ pub mod tests {
..ProcessOptions::default() ..ProcessOptions::default()
}; };
let (_bank_forks, bank_forks_info, _) = let (_bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
assert_eq!(bank_forks_info, vec![BankForksInfo { bank_slot: 0 }]); assert_eq!(bank_forks_info, vec![BankForksInfo { bank_slot: 0 }]);
} }
#[test] #[test]
fn test_process_blocktree_with_incomplete_slot() { fn test_process_blockstore_with_incomplete_slot() {
solana_logger::setup(); solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let ticks_per_slot = genesis_config.ticks_per_slot; let ticks_per_slot = genesis_config.ticks_per_slot;
/* /*
Build a blocktree in the ledger with the following fork structure: Build a blockstore in the ledger with the following fork structure:
slot 0 (all ticks) slot 0 (all ticks)
| |
@ -868,8 +870,8 @@ pub mod tests {
let (ledger_path, mut blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, mut blockhash) = create_new_tmp_ledger!(&genesis_config);
debug!("ledger_path: {:?}", ledger_path); debug!("ledger_path: {:?}", ledger_path);
let blocktree = let blockstore =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger");
// Write slot 1 // Write slot 1
// slot 1, points at slot 0. Missing one tick // slot 1, points at slot 0. Missing one tick
@ -883,7 +885,7 @@ pub mod tests {
entries.pop(); entries.pop();
assert_matches!( assert_matches!(
blocktree.write_entries( blockstore.write_entries(
slot, slot,
0, 0,
0, 0,
@ -899,14 +901,14 @@ pub mod tests {
} }
// slot 2, points at slot 1 // slot 2, points at slot 1
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, blockhash);
let opts = ProcessOptions { let opts = ProcessOptions {
poh_verify: true, poh_verify: true,
..ProcessOptions::default() ..ProcessOptions::default()
}; };
let (mut _bank_forks, bank_forks_info, _) = let (mut _bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts.clone()).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts.clone()).unwrap();
assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks_info.len(), 1);
assert_eq!( assert_eq!(
@ -928,10 +930,10 @@ pub mod tests {
poh_verify: true, poh_verify: true,
..ProcessOptions::default() ..ProcessOptions::default()
}; };
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 0, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 0, blockhash);
// Slot 0 should not show up in the ending bank_forks_info // Slot 0 should not show up in the ending bank_forks_info
let (mut _bank_forks, bank_forks_info, _) = let (mut _bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks_info.len(), 1);
assert_eq!( assert_eq!(
@ -943,7 +945,7 @@ pub mod tests {
} }
#[test] #[test]
fn test_process_blocktree_with_two_forks_and_squash() { fn test_process_blockstore_with_two_forks_and_squash() {
solana_logger::setup(); solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
@ -955,7 +957,7 @@ pub mod tests {
let mut last_entry_hash = blockhash; let mut last_entry_hash = blockhash;
/* /*
Build a blocktree in the ledger with the following fork structure: Build a blockstore in the ledger with the following fork structure:
slot 0 slot 0
| |
@ -968,32 +970,42 @@ pub mod tests {
slot 4 <-- set_root(true) slot 4 <-- set_root(true)
*/ */
let blocktree = let blockstore =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger");
// Fork 1, ending at slot 3 // Fork 1, ending at slot 3
let last_slot1_entry_hash = let last_slot1_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, last_entry_hash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash);
last_entry_hash = last_entry_hash = fill_blockstore_slot_with_ticks(
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, last_slot1_entry_hash); &blockstore,
ticks_per_slot,
2,
1,
last_slot1_entry_hash,
);
let last_fork1_entry_hash = let last_fork1_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 2, last_entry_hash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash);
// Fork 2, ending at slot 4 // Fork 2, ending at slot 4
let last_fork2_entry_hash = let last_fork2_entry_hash = fill_blockstore_slot_with_ticks(
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 1, last_slot1_entry_hash); &blockstore,
ticks_per_slot,
4,
1,
last_slot1_entry_hash,
);
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blocktree.set_roots(&[0, 1, 4]).unwrap(); blockstore.set_roots(&[0, 1, 4]).unwrap();
let opts = ProcessOptions { let opts = ProcessOptions {
poh_verify: true, poh_verify: true,
..ProcessOptions::default() ..ProcessOptions::default()
}; };
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root
@ -1017,7 +1029,7 @@ pub mod tests {
} }
#[test] #[test]
fn test_process_blocktree_with_two_forks() { fn test_process_blockstore_with_two_forks() {
solana_logger::setup(); solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
@ -1029,7 +1041,7 @@ pub mod tests {
let mut last_entry_hash = blockhash; let mut last_entry_hash = blockhash;
/* /*
Build a blocktree in the ledger with the following fork structure: Build a blockstore in the ledger with the following fork structure:
slot 0 slot 0
| |
@ -1042,32 +1054,42 @@ pub mod tests {
slot 4 slot 4
*/ */
let blocktree = let blockstore =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger");
// Fork 1, ending at slot 3 // Fork 1, ending at slot 3
let last_slot1_entry_hash = let last_slot1_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, last_entry_hash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash);
last_entry_hash = last_entry_hash = fill_blockstore_slot_with_ticks(
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, last_slot1_entry_hash); &blockstore,
ticks_per_slot,
2,
1,
last_slot1_entry_hash,
);
let last_fork1_entry_hash = let last_fork1_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 2, last_entry_hash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash);
// Fork 2, ending at slot 4 // Fork 2, ending at slot 4
let last_fork2_entry_hash = let last_fork2_entry_hash = fill_blockstore_slot_with_ticks(
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 1, last_slot1_entry_hash); &blockstore,
ticks_per_slot,
4,
1,
last_slot1_entry_hash,
);
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
blocktree.set_roots(&[0, 1]).unwrap(); blockstore.set_roots(&[0, 1]).unwrap();
let opts = ProcessOptions { let opts = ProcessOptions {
poh_verify: true, poh_verify: true,
..ProcessOptions::default() ..ProcessOptions::default()
}; };
let (bank_forks, mut bank_forks_info, _) = let (bank_forks, mut bank_forks_info, _) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
bank_forks_info.sort_by(|a, b| a.bank_slot.cmp(&b.bank_slot)); bank_forks_info.sort_by(|a, b| a.bank_slot.cmp(&b.bank_slot));
assert_eq!(bank_forks_info.len(), 2); // There are two forks assert_eq!(bank_forks_info.len(), 2); // There are two forks
@ -1107,7 +1129,7 @@ pub mod tests {
} }
#[test] #[test]
fn test_process_blocktree_with_dead_slot() { fn test_process_blockstore_with_dead_slot() {
solana_logger::setup(); solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
@ -1125,16 +1147,16 @@ pub mod tests {
\ \
slot 3 slot 3
*/ */
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let slot1_blockhash = let slot1_blockhash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, slot1_blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash);
blocktree.set_dead_slot(2).unwrap(); blockstore.set_dead_slot(2).unwrap();
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 1, slot1_blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash);
let (bank_forks, bank_forks_info, _) = process_blocktree( let (bank_forks, bank_forks_info, _) = process_blockstore(
&genesis_config, &genesis_config,
&blocktree, &blockstore,
Vec::new(), Vec::new(),
ProcessOptions::default(), ProcessOptions::default(),
) )
@ -1154,7 +1176,7 @@ pub mod tests {
} }
#[test] #[test]
fn test_process_blocktree_with_dead_child() { fn test_process_blockstore_with_dead_child() {
solana_logger::setup(); solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
@ -1172,18 +1194,18 @@ pub mod tests {
/ \ / \
slot 4 (dead) slot 3 slot 4 (dead) slot 3
*/ */
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let slot1_blockhash = let slot1_blockhash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
let slot2_blockhash = let slot2_blockhash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, slot1_blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash);
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 2, slot2_blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, 2, slot2_blockhash);
blocktree.set_dead_slot(4).unwrap(); blockstore.set_dead_slot(4).unwrap();
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 1, slot1_blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash);
let (bank_forks, mut bank_forks_info, _) = process_blocktree( let (bank_forks, mut bank_forks_info, _) = process_blockstore(
&genesis_config, &genesis_config,
&blocktree, &blockstore,
Vec::new(), Vec::new(),
ProcessOptions::default(), ProcessOptions::default(),
) )
@ -1229,14 +1251,14 @@ pub mod tests {
/ \ / \
slot 1 (dead) slot 2 (dead) slot 1 (dead) slot 2 (dead)
*/ */
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash);
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 0, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash);
blocktree.set_dead_slot(1).unwrap(); blockstore.set_dead_slot(1).unwrap();
blocktree.set_dead_slot(2).unwrap(); blockstore.set_dead_slot(2).unwrap();
let (bank_forks, bank_forks_info, _) = process_blocktree( let (bank_forks, bank_forks_info, _) = process_blockstore(
&genesis_config, &genesis_config,
&blocktree, &blockstore,
Vec::new(), Vec::new(),
ProcessOptions::default(), ProcessOptions::default(),
) )
@ -1249,7 +1271,7 @@ pub mod tests {
} }
#[test] #[test]
fn test_process_blocktree_epoch_boundary_root() { fn test_process_blockstore_epoch_boundary_root() {
solana_logger::setup(); solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
@ -1259,8 +1281,8 @@ pub mod tests {
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config);
let mut last_entry_hash = blockhash; let mut last_entry_hash = blockhash;
let blocktree = let blockstore =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger");
// Let last_slot be the number of slots in the first two epochs // Let last_slot be the number of slots in the first two epochs
let epoch_schedule = get_epoch_schedule(&genesis_config, Vec::new()); let epoch_schedule = get_epoch_schedule(&genesis_config, Vec::new());
@ -1268,8 +1290,8 @@ pub mod tests {
// Create a single chain of slots with all indexes in the range [0, last_slot + 1] // Create a single chain of slots with all indexes in the range [0, last_slot + 1]
for i in 1..=last_slot + 1 { for i in 1..=last_slot + 1 {
last_entry_hash = fill_blocktree_slot_with_ticks( last_entry_hash = fill_blockstore_slot_with_ticks(
&blocktree, &blockstore,
ticks_per_slot, ticks_per_slot,
i, i,
i - 1, i - 1,
@ -1279,10 +1301,10 @@ pub mod tests {
// Set a root on the last slot of the last confirmed epoch // Set a root on the last slot of the last confirmed epoch
let rooted_slots: Vec<_> = (0..=last_slot).collect(); let rooted_slots: Vec<_> = (0..=last_slot).collect();
blocktree.set_roots(&rooted_slots).unwrap(); blockstore.set_roots(&rooted_slots).unwrap();
// Set a root on the next slot of the confrimed epoch // Set a root on the next slot of the confrimed epoch
blocktree.set_roots(&[last_slot + 1]).unwrap(); blockstore.set_roots(&[last_slot + 1]).unwrap();
// Check that we can properly restart the ledger / leader scheduler doesn't fail // Check that we can properly restart the ledger / leader scheduler doesn't fail
let opts = ProcessOptions { let opts = ProcessOptions {
@ -1290,7 +1312,7 @@ pub mod tests {
..ProcessOptions::default() ..ProcessOptions::default()
}; };
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
assert_eq!(bank_forks_info.len(), 1); // There is one fork assert_eq!(bank_forks_info.len(), 1); // There is one fork
assert_eq!( assert_eq!(
@ -1418,9 +1440,9 @@ pub mod tests {
)); ));
let last_blockhash = entries.last().unwrap().hash; let last_blockhash = entries.last().unwrap().hash;
let blocktree = let blockstore =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger");
blocktree blockstore
.write_entries( .write_entries(
1, 1,
0, 0,
@ -1438,7 +1460,7 @@ pub mod tests {
..ProcessOptions::default() ..ProcessOptions::default()
}; };
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks_info.len(), 1);
assert_eq!(bank_forks.root(), 0); assert_eq!(bank_forks.root(), 0);
@ -1461,13 +1483,13 @@ pub mod tests {
genesis_config.ticks_per_slot = 1; genesis_config.ticks_per_slot = 1;
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let opts = ProcessOptions { let opts = ProcessOptions {
poh_verify: true, poh_verify: true,
..ProcessOptions::default() ..ProcessOptions::default()
}; };
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks_info.len(), 1);
assert_eq!(bank_forks_info[0], BankForksInfo { bank_slot: 0 }); assert_eq!(bank_forks_info[0], BankForksInfo { bank_slot: 0 });
@ -1480,12 +1502,12 @@ pub mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let opts = ProcessOptions { let opts = ProcessOptions {
override_num_threads: Some(1), override_num_threads: Some(1),
..ProcessOptions::default() ..ProcessOptions::default()
}; };
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
PAR_THREAD_POOL.with(|pool| { PAR_THREAD_POOL.with(|pool| {
assert_eq!(pool.borrow().current_num_threads(), 1); assert_eq!(pool.borrow().current_num_threads(), 1);
}); });
@ -1496,13 +1518,13 @@ pub mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
let opts = ProcessOptions { let opts = ProcessOptions {
full_leader_cache: true, full_leader_cache: true,
..ProcessOptions::default() ..ProcessOptions::default()
}; };
let (_bank_forks, _bank_forks_info, cached_leader_schedule) = let (_bank_forks, _bank_forks_info, cached_leader_schedule) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
assert_eq!(cached_leader_schedule.max_schedules(), std::usize::MAX); assert_eq!(cached_leader_schedule.max_schedules(), std::usize::MAX);
} }
@ -1514,8 +1536,8 @@ pub mod tests {
.. ..
} = create_genesis_config(100); } = create_genesis_config(100);
let (ledger_path, last_entry_hash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, last_entry_hash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = let blockstore =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger"); Blockstore::open(&ledger_path).expect("Expected to successfully open database ledger");
let blockhash = genesis_config.hash(); let blockhash = genesis_config.hash();
let keypairs = [Keypair::new(), Keypair::new(), Keypair::new()]; let keypairs = [Keypair::new(), Keypair::new(), Keypair::new()];
@ -1531,7 +1553,7 @@ pub mod tests {
0, 0,
last_entry_hash, last_entry_hash,
)); ));
blocktree blockstore
.write_entries( .write_entries(
1, 1,
0, 0,
@ -1562,7 +1584,7 @@ pub mod tests {
entry_callback: Some(entry_callback), entry_callback: Some(entry_callback),
..ProcessOptions::default() ..ProcessOptions::default()
}; };
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap(); process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
assert_eq!(*callback_counter.write().unwrap(), 2); assert_eq!(*callback_counter.write().unwrap(), 2);
} }
@ -2183,7 +2205,7 @@ pub mod tests {
} }
#[test] #[test]
fn test_process_blocktree_from_root() { fn test_process_blockstore_from_root() {
let GenesisConfigInfo { let GenesisConfigInfo {
mut genesis_config, .. mut genesis_config, ..
} = create_genesis_config(123); } = create_genesis_config(123);
@ -2191,10 +2213,10 @@ pub mod tests {
let ticks_per_slot = 1; let ticks_per_slot = 1;
genesis_config.ticks_per_slot = ticks_per_slot; genesis_config.ticks_per_slot = ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config); let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blockstore = Blockstore::open(&ledger_path).unwrap();
/* /*
Build a blocktree in the ledger with the following fork structure: Build a blockstore in the ledger with the following fork structure:
slot 0 (all ticks) slot 0 (all ticks)
| |
@ -2214,9 +2236,9 @@ pub mod tests {
let mut last_hash = blockhash; let mut last_hash = blockhash;
for i in 0..6 { for i in 0..6 {
last_hash = last_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, i + 1, i, last_hash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash);
} }
blocktree.set_roots(&[3, 5]).unwrap(); blockstore.set_roots(&[3, 5]).unwrap();
// Set up bank1 // Set up bank1
let bank0 = Arc::new(Bank::new(&genesis_config)); let bank0 = Arc::new(Bank::new(&genesis_config));
@ -2224,16 +2246,16 @@ pub mod tests {
poh_verify: true, poh_verify: true,
..ProcessOptions::default() ..ProcessOptions::default()
}; };
process_bank_0(&bank0, &blocktree, &opts).unwrap(); process_bank_0(&bank0, &blockstore, &opts).unwrap();
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
let slot1_entries = blocktree.get_slot_entries(1, 0, None).unwrap(); let slot1_entries = blockstore.get_slot_entries(1, 0, None).unwrap();
verify_and_process_slot_entries(&bank1, &slot1_entries, bank0.last_blockhash(), &opts) verify_and_process_slot_entries(&bank1, &slot1_entries, bank0.last_blockhash(), &opts)
.unwrap(); .unwrap();
bank1.squash(); bank1.squash();
// Test process_blocktree_from_root() from slot 1 onwards // Test process_blockstore_from_root() from slot 1 onwards
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree_from_root(&genesis_config, &blocktree, bank1, &opts).unwrap(); process_blockstore_from_root(&genesis_config, &blockstore, bank1, &opts).unwrap();
assert_eq!(bank_forks_info.len(), 1); // One fork assert_eq!(bank_forks_info.len(), 1); // One fork
assert_eq!( assert_eq!(

View File

@ -135,7 +135,7 @@ pub mod test {
use solana_sdk::clock::Slot; use solana_sdk::clock::Slot;
/// Specifies the contents of a 16-data-shred and 4-coding-shred erasure set /// Specifies the contents of a 16-data-shred and 4-coding-shred erasure set
/// Exists to be passed to `generate_blocktree_with_coding` /// Exists to be passed to `generate_blockstore_with_coding`
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct ErasureSpec { pub struct ErasureSpec {
/// Which 16-shred erasure set this represents /// Which 16-shred erasure set this represents
@ -145,7 +145,7 @@ pub mod test {
} }
/// Specifies the contents of a slot /// Specifies the contents of a slot
/// Exists to be passed to `generate_blocktree_with_coding` /// Exists to be passed to `generate_blockstore_with_coding`
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct SlotSpec { pub struct SlotSpec {
pub slot: Slot, pub slot: Slot,

View File

@ -1,5 +1,5 @@
use crate::{ use crate::{
blocktree::Blocktree, blockstore::Blockstore,
leader_schedule::{FixedSchedule, LeaderSchedule}, leader_schedule::{FixedSchedule, LeaderSchedule},
leader_schedule_utils, leader_schedule_utils,
}; };
@ -105,7 +105,7 @@ impl LeaderScheduleCache {
pubkey: &Pubkey, pubkey: &Pubkey,
mut current_slot: Slot, mut current_slot: Slot,
bank: &Bank, bank: &Bank,
blocktree: Option<&Blocktree>, blockstore: Option<&Blockstore>,
) -> Option<(Slot, Slot)> { ) -> Option<(Slot, Slot)> {
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1); let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
let mut first_slot = None; let mut first_slot = None;
@ -132,8 +132,8 @@ impl LeaderScheduleCache {
for i in start_index..bank.get_slots_in_epoch(epoch) { for i in start_index..bank.get_slots_in_epoch(epoch) {
current_slot += 1; current_slot += 1;
if *pubkey == leader_schedule[i] { if *pubkey == leader_schedule[i] {
if let Some(blocktree) = blocktree { if let Some(blockstore) = blockstore {
if let Some(meta) = blocktree.meta(current_slot).unwrap() { if let Some(meta) = blockstore.meta(current_slot).unwrap() {
// We have already sent a shred for this slot, so skip it // We have already sent a shred for this slot, so skip it
if meta.received > 0 { if meta.received > 0 {
continue; continue;
@ -255,7 +255,7 @@ impl LeaderScheduleCache {
mod tests { mod tests {
use super::*; use super::*;
use crate::{ use crate::{
blocktree::make_slot_entries, blockstore::make_slot_entries,
genesis_utils::{ genesis_utils::{
create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo, create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo,
BOOTSTRAP_LEADER_LAMPORTS, BOOTSTRAP_LEADER_LAMPORTS,
@ -424,7 +424,7 @@ mod tests {
} }
#[test] #[test]
fn test_next_leader_slot_blocktree() { fn test_next_leader_slot_blockstore() {
let pubkey = Pubkey::new_rand(); let pubkey = Pubkey::new_rand();
let mut genesis_config = create_genesis_config_with_leader( let mut genesis_config = create_genesis_config_with_leader(
BOOTSTRAP_LEADER_LAMPORTS, BOOTSTRAP_LEADER_LAMPORTS,
@ -438,8 +438,9 @@ mod tests {
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
{ {
let blocktree = Arc::new( let blockstore = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
); );
assert_eq!( assert_eq!(
@ -449,7 +450,7 @@ mod tests {
// Check that the next leader slot after 0 is slot 1 // Check that the next leader slot after 0 is slot 1
assert_eq!( assert_eq!(
cache cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)) .next_leader_slot(&pubkey, 0, &bank, Some(&blockstore))
.unwrap() .unwrap()
.0, .0,
1 1
@ -458,10 +459,10 @@ mod tests {
// Write a shred into slot 2 that chains to slot 1, // Write a shred into slot 2 that chains to slot 1,
// but slot 1 is empty so should not be skipped // but slot 1 is empty so should not be skipped
let (shreds, _) = make_slot_entries(2, 1, 1); let (shreds, _) = make_slot_entries(2, 1, 1);
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!( assert_eq!(
cache cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)) .next_leader_slot(&pubkey, 0, &bank, Some(&blockstore))
.unwrap() .unwrap()
.0, .0,
1 1
@ -471,10 +472,10 @@ mod tests {
let (shreds, _) = make_slot_entries(1, 0, 1); let (shreds, _) = make_slot_entries(1, 0, 1);
// Check that slot 1 and 2 are skipped // Check that slot 1 and 2 are skipped
blocktree.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!( assert_eq!(
cache cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)) .next_leader_slot(&pubkey, 0, &bank, Some(&blockstore))
.unwrap() .unwrap()
.0, .0,
3 3
@ -486,7 +487,7 @@ mod tests {
&pubkey, &pubkey,
2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2 2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank, &bank,
Some(&blocktree) Some(&blockstore)
), ),
None None
); );
@ -496,12 +497,12 @@ mod tests {
&Pubkey::new_rand(), // not in leader_schedule &Pubkey::new_rand(), // not in leader_schedule
0, 0,
&bank, &bank,
Some(&blocktree) Some(&blockstore)
), ),
None None
); );
} }
Blocktree::destroy(&ledger_path).unwrap(); Blockstore::destroy(&ledger_path).unwrap();
} }
#[test] #[test]

View File

@ -2,10 +2,10 @@ pub mod bank_forks;
pub mod bank_forks_utils; pub mod bank_forks_utils;
pub mod block_error; pub mod block_error;
#[macro_use] #[macro_use]
pub mod blocktree; pub mod blockstore;
pub mod blocktree_db; pub mod blockstore_db;
mod blocktree_meta; mod blockstore_meta;
pub mod blocktree_processor; pub mod blockstore_processor;
pub mod entry; pub mod entry;
pub mod erasure; pub mod erasure;
pub mod genesis_utils; pub mod genesis_utils;

View File

@ -1,24 +1,24 @@
use crate::blocktree_db::Result; use crate::blockstore_db::Result;
use crate::{blocktree::*, blocktree_meta::SlotMeta}; use crate::{blockstore::*, blockstore_meta::SlotMeta};
use log::*; use log::*;
use solana_sdk::clock::Slot; use solana_sdk::clock::Slot;
pub struct RootedSlotIterator<'a> { pub struct RootedSlotIterator<'a> {
next_slots: Vec<Slot>, next_slots: Vec<Slot>,
prev_root: Slot, prev_root: Slot,
blocktree: &'a Blocktree, blockstore: &'a Blockstore,
} }
impl<'a> RootedSlotIterator<'a> { impl<'a> RootedSlotIterator<'a> {
pub fn new(start_slot: Slot, blocktree: &'a Blocktree) -> Result<Self> { pub fn new(start_slot: Slot, blockstore: &'a Blockstore) -> Result<Self> {
if blocktree.is_root(start_slot) { if blockstore.is_root(start_slot) {
Ok(Self { Ok(Self {
next_slots: vec![start_slot], next_slots: vec![start_slot],
prev_root: start_slot, prev_root: start_slot,
blocktree, blockstore,
}) })
} else { } else {
Err(BlocktreeError::SlotNotRooted) Err(BlockstoreError::SlotNotRooted)
} }
} }
} }
@ -31,11 +31,11 @@ impl<'a> Iterator for RootedSlotIterator<'a> {
let (rooted_slot, slot_skipped) = self let (rooted_slot, slot_skipped) = self
.next_slots .next_slots
.iter() .iter()
.find(|x| self.blocktree.is_root(**x)) .find(|x| self.blockstore.is_root(**x))
.map(|x| (Some(*x), false)) .map(|x| (Some(*x), false))
.unwrap_or_else(|| { .unwrap_or_else(|| {
let mut iter = self let mut iter = self
.blocktree .blockstore
.rooted_slot_iterator( .rooted_slot_iterator(
// First iteration the root always exists as guaranteed by the constructor, // First iteration the root always exists as guaranteed by the constructor,
// so this unwrap_or_else cases won't be hit. Every subsequent iteration // so this unwrap_or_else cases won't be hit. Every subsequent iteration
@ -49,7 +49,7 @@ impl<'a> Iterator for RootedSlotIterator<'a> {
let slot_meta = rooted_slot let slot_meta = rooted_slot
.map(|r| { .map(|r| {
self.blocktree self.blockstore
.meta(r) .meta(r)
.expect("Database failure, couldnt fetch SlotMeta") .expect("Database failure, couldnt fetch SlotMeta")
}) })
@ -77,17 +77,17 @@ impl<'a> Iterator for RootedSlotIterator<'a> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::blocktree_processor::fill_blocktree_slot_with_ticks; use crate::blockstore_processor::fill_blockstore_slot_with_ticks;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
#[test] #[test]
fn test_rooted_slot_iterator() { fn test_rooted_slot_iterator() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
blocktree.set_roots(&[0]).unwrap(); blockstore.set_roots(&[0]).unwrap();
let ticks_per_slot = 5; let ticks_per_slot = 5;
/* /*
Build a blocktree in the ledger with the following fork structure: Build a blockstore in the ledger with the following fork structure:
slot 0 slot 0
| |
@ -113,8 +113,8 @@ mod tests {
slot - 1 slot - 1
} }
}; };
let last_entry_hash = fill_blocktree_slot_with_ticks( let last_entry_hash = fill_blockstore_slot_with_ticks(
&blocktree, &blockstore,
ticks_per_slot, ticks_per_slot,
slot, slot,
parent, parent,
@ -128,16 +128,16 @@ mod tests {
// Fork 2, ending at slot 4 // Fork 2, ending at slot 4
let _ = let _ =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, fork_point, fork_hash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, fork_point, fork_hash);
// Set a root // Set a root
blocktree.set_roots(&[1, 2, 3]).unwrap(); blockstore.set_roots(&[1, 2, 3]).unwrap();
// Trying to get an iterator on a different fork will error // Trying to get an iterator on a different fork will error
assert!(RootedSlotIterator::new(4, &blocktree).is_err()); assert!(RootedSlotIterator::new(4, &blockstore).is_err());
// Trying to get an iterator on any slot on the root fork should succeed // Trying to get an iterator on any slot on the root fork should succeed
let result: Vec<_> = RootedSlotIterator::new(3, &blocktree) let result: Vec<_> = RootedSlotIterator::new(3, &blockstore)
.unwrap() .unwrap()
.into_iter() .into_iter()
.map(|(slot, _)| slot) .map(|(slot, _)| slot)
@ -145,7 +145,7 @@ mod tests {
let expected = vec![3]; let expected = vec![3];
assert_eq!(result, expected); assert_eq!(result, expected);
let result: Vec<_> = RootedSlotIterator::new(0, &blocktree) let result: Vec<_> = RootedSlotIterator::new(0, &blockstore)
.unwrap() .unwrap()
.into_iter() .into_iter()
.map(|(slot, _)| slot) .map(|(slot, _)| slot)
@ -153,17 +153,17 @@ mod tests {
let expected = vec![0, 1, 2, 3]; let expected = vec![0, 1, 2, 3];
assert_eq!(result, expected); assert_eq!(result, expected);
drop(blocktree); drop(blockstore);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
#[test] #[test]
fn test_skipping_rooted_slot_iterator() { fn test_skipping_rooted_slot_iterator() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blockstore = Blockstore::open(&blockstore_path).unwrap();
let ticks_per_slot = 5; let ticks_per_slot = 5;
/* /*
Build a blocktree in the ledger with the following fork structure: Build a blockstore in the ledger with the following fork structure:
slot 0 slot 0
| |
slot 1 slot 1
@ -188,8 +188,8 @@ mod tests {
slot - 1 slot - 1
} }
}; };
fill_blocktree_slot_with_ticks( fill_blockstore_slot_with_ticks(
&blocktree, &blockstore,
ticks_per_slot, ticks_per_slot,
slot, slot,
parent, parent,
@ -198,14 +198,14 @@ mod tests {
} }
// Set roots // Set roots
blocktree.set_roots(&[0, 1, 2, 3]).unwrap(); blockstore.set_roots(&[0, 1, 2, 3]).unwrap();
// Create one post-skip slot at 10, simulating starting from a snapshot // Create one post-skip slot at 10, simulating starting from a snapshot
// at 10 // at 10
blocktree.set_roots(&[10]).unwrap(); blockstore.set_roots(&[10]).unwrap();
// Try to get an iterator from before the skip. The post-skip slot // Try to get an iterator from before the skip. The post-skip slot
// should not return a SlotMeta // should not return a SlotMeta
let result: Vec<_> = RootedSlotIterator::new(3, &blocktree) let result: Vec<_> = RootedSlotIterator::new(3, &blockstore)
.unwrap() .unwrap()
.into_iter() .into_iter()
.map(|(slot, meta)| (slot, meta.is_some())) .map(|(slot, meta)| (slot, meta.is_some()))
@ -214,12 +214,12 @@ mod tests {
assert_eq!(result, expected); assert_eq!(result, expected);
// Create one more post-skip slot at 11 with parent equal to 10 // Create one more post-skip slot at 11 with parent equal to 10
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 11, 10, Hash::default()); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 11, 10, Hash::default());
// Set roots // Set roots
blocktree.set_roots(&[11]).unwrap(); blockstore.set_roots(&[11]).unwrap();
let result: Vec<_> = RootedSlotIterator::new(0, &blocktree) let result: Vec<_> = RootedSlotIterator::new(0, &blockstore)
.unwrap() .unwrap()
.into_iter() .into_iter()
.map(|(slot, meta)| (slot, meta.is_some())) .map(|(slot, meta)| (slot, meta.is_some()))
@ -234,7 +234,7 @@ mod tests {
]; ];
assert_eq!(result, expected); assert_eq!(result, expected);
drop(blocktree); drop(blockstore);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
} }

View File

@ -1,6 +1,6 @@
use solana_ledger::entry; use solana_ledger::entry;
use solana_ledger::{ use solana_ledger::{
blocktree::{self, Blocktree}, blockstore::{self, Blockstore},
get_tmp_ledger_path, get_tmp_ledger_path,
}; };
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
@ -9,8 +9,8 @@ use std::thread::Builder;
#[test] #[test]
fn test_multiple_threads_insert_shred() { fn test_multiple_threads_insert_shred() {
let blocktree_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap()); let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
for _ in 0..100 { for _ in 0..100 {
let num_threads = 10; let num_threads = 10;
@ -20,12 +20,12 @@ fn test_multiple_threads_insert_shred() {
let threads: Vec<_> = (0..num_threads) let threads: Vec<_> = (0..num_threads)
.map(|i| { .map(|i| {
let entries = entry::create_ticks(1, 0, Hash::default()); let entries = entry::create_ticks(1, 0, Hash::default());
let shreds = blocktree::entries_to_test_shreds(entries, i + 1, 0, false, 0); let shreds = blockstore::entries_to_test_shreds(entries, i + 1, 0, false, 0);
let blocktree_ = blocktree.clone(); let blockstore_ = blockstore.clone();
Builder::new() Builder::new()
.name("blocktree-writer".to_string()) .name("blockstore-writer".to_string())
.spawn(move || { .spawn(move || {
blocktree_.insert_shreds(shreds, None, false).unwrap(); blockstore_.insert_shreds(shreds, None, false).unwrap();
}) })
.unwrap() .unwrap()
}) })
@ -36,16 +36,16 @@ fn test_multiple_threads_insert_shred() {
} }
// Check slot 0 has the correct children // Check slot 0 has the correct children
let mut meta0 = blocktree.meta(0).unwrap().unwrap(); let mut meta0 = blockstore.meta(0).unwrap().unwrap();
meta0.next_slots.sort(); meta0.next_slots.sort();
let expected_next_slots: Vec<_> = (1..num_threads + 1).collect(); let expected_next_slots: Vec<_> = (1..num_threads + 1).collect();
assert_eq!(meta0.next_slots, expected_next_slots); assert_eq!(meta0.next_slots, expected_next_slots);
// Delete slots for next iteration // Delete slots for next iteration
blocktree.purge_slots(0, None); blockstore.purge_slots(0, None);
} }
// Cleanup // Cleanup
drop(blocktree); drop(blockstore);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }

View File

@ -10,7 +10,7 @@ use solana_core::{
gossip_service::discover_cluster, gossip_service::discover_cluster,
}; };
use solana_ledger::{ use solana_ledger::{
blocktree::Blocktree, blockstore::Blockstore,
entry::{Entry, EntrySlice}, entry::{Entry, EntrySlice},
}; };
use solana_sdk::{ use solana_sdk::{
@ -140,7 +140,7 @@ pub fn validator_exit(entry_point_info: &ContactInfo, nodes: usize) {
} }
pub fn verify_ledger_ticks(ledger_path: &Path, ticks_per_slot: usize) { pub fn verify_ledger_ticks(ledger_path: &Path, ticks_per_slot: usize) {
let ledger = Blocktree::open(ledger_path).unwrap(); let ledger = Blockstore::open(ledger_path).unwrap();
let zeroth_slot = ledger.get_slot_entries(0, 0, None).unwrap(); let zeroth_slot = ledger.get_slot_entries(0, 0, None).unwrap();
let last_id = zeroth_slot.last().unwrap().hash; let last_id = zeroth_slot.last().unwrap().hash;
let next_slots = ledger.get_slots_since(&[0]).unwrap().remove(&0).unwrap(); let next_slots = ledger.get_slots_since(&[0]).unwrap().remove(&0).unwrap();
@ -301,19 +301,23 @@ fn poll_all_nodes_for_signature(
Ok(()) Ok(())
} }
fn get_and_verify_slot_entries(blocktree: &Blocktree, slot: Slot, last_entry: &Hash) -> Vec<Entry> { fn get_and_verify_slot_entries(
let entries = blocktree.get_slot_entries(slot, 0, None).unwrap(); blockstore: &Blockstore,
slot: Slot,
last_entry: &Hash,
) -> Vec<Entry> {
let entries = blockstore.get_slot_entries(slot, 0, None).unwrap();
assert_eq!(entries.verify(last_entry), true); assert_eq!(entries.verify(last_entry), true);
entries entries
} }
fn verify_slot_ticks( fn verify_slot_ticks(
blocktree: &Blocktree, blockstore: &Blockstore,
slot: Slot, slot: Slot,
last_entry: &Hash, last_entry: &Hash,
expected_num_ticks: Option<usize>, expected_num_ticks: Option<usize>,
) -> Hash { ) -> Hash {
let entries = get_and_verify_slot_entries(blocktree, slot, last_entry); let entries = get_and_verify_slot_entries(blockstore, slot, last_entry);
let num_ticks: usize = entries.iter().map(|entry| entry.is_tick() as usize).sum(); let num_ticks: usize = entries.iter().map(|entry| entry.is_tick() as usize).sum();
if let Some(expected_num_ticks) = expected_num_ticks { if let Some(expected_num_ticks) = expected_num_ticks {
assert_eq!(num_ticks, expected_num_ticks); assert_eq!(num_ticks, expected_num_ticks);

View File

@ -9,7 +9,7 @@ use solana_core::{
storage_stage::SLOTS_PER_TURN_TEST, storage_stage::SLOTS_PER_TURN_TEST,
validator::ValidatorConfig, validator::ValidatorConfig,
}; };
use solana_ledger::{blocktree::Blocktree, create_new_tmp_ledger, get_tmp_ledger_path}; use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger, get_tmp_ledger_path};
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster}; use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
use solana_sdk::{ use solana_sdk::{
commitment_config::CommitmentConfig, commitment_config::CommitmentConfig,
@ -62,9 +62,14 @@ fn run_archiver_startup_basic(num_nodes: usize, num_archivers: usize) {
cluster_nodes[0].clone(), cluster_nodes[0].clone(),
))); )));
let path = get_tmp_ledger_path!(); let path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&path).unwrap()); let blockstore = Arc::new(Blockstore::open(&path).unwrap());
Archiver::download_from_archiver(&cluster_info, &archiver_info, &blocktree, slots_per_segment) Archiver::download_from_archiver(
.unwrap(); &cluster_info,
&archiver_info,
&blockstore,
slots_per_segment,
)
.unwrap();
} }
#[test] #[test]
@ -113,8 +118,8 @@ fn test_archiver_startup_leader_hang() {
assert!(archiver_res.is_err()); assert!(archiver_res.is_err());
} }
let _ignored = Blocktree::destroy(&leader_ledger_path); let _ignored = Blockstore::destroy(&leader_ledger_path);
let _ignored = Blocktree::destroy(&archiver_ledger_path); let _ignored = Blockstore::destroy(&archiver_ledger_path);
let _ignored = remove_dir_all(&leader_ledger_path); let _ignored = remove_dir_all(&leader_ledger_path);
let _ignored = remove_dir_all(&archiver_ledger_path); let _ignored = remove_dir_all(&archiver_ledger_path);
} }

View File

@ -10,7 +10,7 @@ use solana_core::{
validator::ValidatorConfig, validator::ValidatorConfig,
}; };
use solana_ledger::{ use solana_ledger::{
bank_forks::SnapshotConfig, blocktree::Blocktree, leader_schedule::FixedSchedule, bank_forks::SnapshotConfig, blockstore::Blockstore, leader_schedule::FixedSchedule,
leader_schedule::LeaderSchedule, snapshot_utils, leader_schedule::LeaderSchedule, snapshot_utils,
}; };
use solana_local_cluster::{ use solana_local_cluster::{
@ -67,12 +67,12 @@ fn test_ledger_cleanup_service() {
//check everyone's ledgers and make sure only ~100 slots are stored //check everyone's ledgers and make sure only ~100 slots are stored
for (_, info) in &cluster.validators { for (_, info) in &cluster.validators {
let mut slots = 0; let mut slots = 0;
let blocktree = Blocktree::open(&info.info.ledger_path).unwrap(); let blockstore = Blockstore::open(&info.info.ledger_path).unwrap();
blocktree blockstore
.slot_meta_iterator(0) .slot_meta_iterator(0)
.unwrap() .unwrap()
.for_each(|_| slots += 1); .for_each(|_| slots += 1);
// with 3 nodes upto 3 slots can be in progress and not complete so max slots in blocktree should be upto 103 // with 3 nodes upto 3 slots can be in progress and not complete so max slots in blockstore should be upto 103
assert!(slots <= 103, "got {}", slots); assert!(slots <= 103, "got {}", slots);
} }
} }
@ -674,7 +674,7 @@ fn test_snapshot_restart_tower() {
#[test] #[test]
#[serial] #[serial]
fn test_snapshots_blocktree_floor() { fn test_snapshots_blockstore_floor() {
// First set up the cluster with 1 snapshotting leader // First set up the cluster with 1 snapshotting leader
let snapshot_interval_slots = 10; let snapshot_interval_slots = 10;
let num_account_paths = 4; let num_account_paths = 4;
@ -747,10 +747,10 @@ fn test_snapshots_blocktree_floor() {
// Check the validator ledger doesn't contain any slots < slot_floor // Check the validator ledger doesn't contain any slots < slot_floor
cluster.close_preserve_ledgers(); cluster.close_preserve_ledgers();
let validator_ledger_path = &cluster.validators[&validator_id]; let validator_ledger_path = &cluster.validators[&validator_id];
let blocktree = Blocktree::open(&validator_ledger_path.info.ledger_path).unwrap(); let blockstore = Blockstore::open(&validator_ledger_path.info.ledger_path).unwrap();
// Skip the zeroth slot in blocktree that the ledger is initialized with // Skip the zeroth slot in blockstore that the ledger is initialized with
let (first_slot, _) = blocktree.slot_meta_iterator(1).unwrap().next().unwrap(); let (first_slot, _) = blockstore.slot_meta_iterator(1).unwrap().next().unwrap();
assert_eq!(first_slot, slot_floor); assert_eq!(first_slot, slot_floor);
} }
@ -932,7 +932,7 @@ fn test_no_voting() {
cluster.close_preserve_ledgers(); cluster.close_preserve_ledgers();
let leader_pubkey = cluster.entry_point_info.id; let leader_pubkey = cluster.entry_point_info.id;
let ledger_path = cluster.validators[&leader_pubkey].info.ledger_path.clone(); let ledger_path = cluster.validators[&leader_pubkey].info.ledger_path.clone();
let ledger = Blocktree::open(&ledger_path).unwrap(); let ledger = Blockstore::open(&ledger_path).unwrap();
for i in 0..2 * VOTE_THRESHOLD_DEPTH { for i in 0..2 * VOTE_THRESHOLD_DEPTH {
let meta = ledger.meta(i as u64).unwrap().unwrap(); let meta = ledger.meta(i as u64).unwrap().unwrap();
let parent = meta.parent_slot; let parent = meta.parent_slot;

View File

@ -4062,7 +4062,7 @@
"hide": false, "hide": false,
"orderByTime": "ASC", "orderByTime": "ASC",
"policy": "default", "policy": "default",
"query": "SELECT host_id,error FROM \"$testnet\".\"autogen\".\"blocktree_error\" WHERE $timeFilter ORDER BY time DESC ", "query": "SELECT host_id,error FROM \"$testnet\".\"autogen\".\"blockstore_error\" WHERE $timeFilter ORDER BY time DESC ",
"rawQuery": true, "rawQuery": true,
"refId": "A", "refId": "A",
"resultFormat": "table", "resultFormat": "table",
@ -4083,7 +4083,7 @@
"tags": [] "tags": []
} }
], ],
"title": "Unexpected Blocktree Errors", "title": "Unexpected Blockstore Errors",
"transform": "table", "transform": "table",
"type": "table" "type": "table"
}, },
@ -6693,7 +6693,7 @@
"measurement": "cluster_info-vote-count", "measurement": "cluster_info-vote-count",
"orderByTime": "ASC", "orderByTime": "ASC",
"policy": "autogen", "policy": "autogen",
"query": "SELECT sum(\"recovered\") AS \"recovered\" FROM \"$testnet\".\"autogen\".\"blocktree-erasure\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval) FILL(0)", "query": "SELECT sum(\"recovered\") AS \"recovered\" FROM \"$testnet\".\"autogen\".\"blockstore-erasure\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval) FILL(0)",
"rawQuery": true, "rawQuery": true,
"refId": "B", "refId": "B",
"resultFormat": "time_series", "resultFormat": "time_series",