Fix doc warnings (#25953)

This commit is contained in:
Brian Anderson 2022-06-14 22:55:08 -05:00 committed by GitHub
parent ae37359b6b
commit db9004bd0f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 15 additions and 9 deletions

View File

@ -191,20 +191,22 @@ impl LedgerCleanupService {
/// already has fewer than `max_ledger_shreds`; otherwise, the cleanup will
/// purge enough slots to get the ledger size below `max_ledger_shreds`.
///
/// [`new_root_receiver`]: signal receiver which contains the information
/// # Arguments
///
/// - `new_root_receiver`: signal receiver which contains the information
/// about what `Slot` is the current root.
/// [`max_ledger_shreds`]: the number of shreds to keep since the new root.
/// [`last_purge_slot`]: an both an input and output parameter indicating
/// - `max_ledger_shreds`: the number of shreds to keep since the new root.
/// - `last_purge_slot`: an both an input and output parameter indicating
/// the id of the last purged slot. As an input parameter, it works
/// together with `purge_interval` on whether it is too early to perform
/// ledger cleanup. As an output parameter, it will be updated if this
/// function actually performs the ledger cleanup.
/// [`purge_interval`]: the minimum slot interval between two ledger
/// - `purge_interval`: the minimum slot interval between two ledger
/// cleanup. When the root derived from `new_root_receiver` minus
/// `last_purge_slot` is fewer than `purge_interval`, the function will
/// simply return `Ok` without actually running the ledger cleanup.
/// In this case, `purge_interval` will remain unchanged.
/// [`last_compact_slot`]: an output value which indicates the most recent
/// - `last_compact_slot`: an output value which indicates the most recent
/// slot which has been cleaned up after this call. If this parameter is
/// updated after this function call, it means the ledger cleanup has
/// been performed.

View File

@ -413,8 +413,8 @@ impl Blockstore {
}
}
/// Whether to disable compaction in [`compact_storage`], which is used
/// by the ledger cleanup service and [`backup_and_clear_blockstore`].
/// Whether to disable compaction in [`Blockstore::compact_storage`], which is used
/// by the ledger cleanup service and `solana_core::validator::backup_and_clear_blockstore`.
///
/// Note that this setting is not related to the RocksDB's background
/// compaction.
@ -654,6 +654,8 @@ impl Blockstore {
/// Collects and reports [`BlockstoreRocksDbColumnFamilyMetrics`] for the
/// all the column families.
///
/// [`BlockstoreRocksDbColumnFamilyMetrics`]: crate::blockstore_metrics::BlockstoreRocksDbColumnFamilyMetrics
pub fn submit_rocksdb_cf_metrics_for_all_cfs(&self) {
self.meta_cf.submit_rocksdb_cf_metrics();
self.dead_slots_cf.submit_rocksdb_cf_metrics();

View File

@ -12,6 +12,8 @@ pub struct BlockstoreOptions {
impl Default for BlockstoreOptions {
/// The default options are the values used by [`Blockstore::open`].
///
/// [`Blockstore::open`]: crate::blockstore::Blockstore::open
fn default() -> Self {
Self {
access_type: AccessType::Primary,

View File

@ -154,7 +154,7 @@ impl PacketBatch {
///
/// # Safety
///
/// - `new_len` must be less than or equal to [`capacity()`].
/// - `new_len` must be less than or equal to [`self.capacity`].
/// - The elements at `old_len..new_len` must be initialized. Packet data
/// will likely be overwritten when populating the packet, but the meta
/// should specifically be initialized to known values.

View File

@ -56,7 +56,7 @@ pub trait MultiScalarMultiplication {
/// complications in computing the cost for the syscall. The computational costs should only
/// depend on the length of the vectors (and the curve), so it would be ideal to support
/// variable length inputs and compute the syscall cost as is done in eip-197:
/// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-197.md#gas-costs. If not, then we can
/// <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-197.md#gas-costs>. If not, then we can
/// consider bounding the length of the input and assigning worst-case cost.
fn multiscalar_multiply(
scalars: &[Self::Scalar],