zebra/zebra-network/src/peer_cache_updater.rs

65 lines
2.1 KiB
Rust
Raw Normal View History

feat(net): Cache a list of useful peers on disk (#6739) * Rewrite some state cache docs to clarify * Add a zebra_network::Config.cache_dir for peer address caches * Add new config test files and fix config test failure message * Create some zebra-chain and zebra-network convenience functions * Add methods for reading and writing the peer address cache * Add cached disk peers to the initial peers list * Add metrics and logging for loading and storing the peer cache * Replace log of useless redacted peer IP addresses * Limit the peer cache minimum and maximum size, don't write empty caches * Add a cacheable_peers() method to the address book * Add a peer disk cache updater task to the peer set tasks * Document that the peer cache is shared by multiple instances unless configured otherwise * Disable peer cache read/write in disconnected tests * Make initial peer cache updater sleep shorter for tests * Add unit tests for reading and writing the peer cache * Update the task list in the start command docs * Modify the existing persistent acceptance test to check for peer caches * Update the peer cache directory when writing test configs * Add a CacheDir type so the default config can be enabled, but tests can disable it * Update tests to use the CacheDir config type * Rename some CacheDir internals * Add config file test cases for each kind of CacheDir config * Panic if the config contains invalid socket addresses, rather than continuing * Add a network directory to state cache directory contents tests * Add new network.cache_dir config to the config parsing tests
2023-06-06 01:28:14 -07:00
//! An async task that regularly updates the peer cache on disk from the current address book.
use std::{
io,
sync::{Arc, Mutex},
};
use chrono::Utc;
use tokio::time::sleep;
use crate::{
constants::{DNS_LOOKUP_TIMEOUT, PEER_DISK_CACHE_UPDATE_INTERVAL},
meta_addr::MetaAddr,
AddressBook, BoxError, Config,
};
/// An ongoing task that regularly caches the current `address_book` to disk, based on `config`.
#[instrument(skip(config, address_book))]
feat(net): Cache a list of useful peers on disk (#6739) * Rewrite some state cache docs to clarify * Add a zebra_network::Config.cache_dir for peer address caches * Add new config test files and fix config test failure message * Create some zebra-chain and zebra-network convenience functions * Add methods for reading and writing the peer address cache * Add cached disk peers to the initial peers list * Add metrics and logging for loading and storing the peer cache * Replace log of useless redacted peer IP addresses * Limit the peer cache minimum and maximum size, don't write empty caches * Add a cacheable_peers() method to the address book * Add a peer disk cache updater task to the peer set tasks * Document that the peer cache is shared by multiple instances unless configured otherwise * Disable peer cache read/write in disconnected tests * Make initial peer cache updater sleep shorter for tests * Add unit tests for reading and writing the peer cache * Update the task list in the start command docs * Modify the existing persistent acceptance test to check for peer caches * Update the peer cache directory when writing test configs * Add a CacheDir type so the default config can be enabled, but tests can disable it * Update tests to use the CacheDir config type * Rename some CacheDir internals * Add config file test cases for each kind of CacheDir config * Panic if the config contains invalid socket addresses, rather than continuing * Add a network directory to state cache directory contents tests * Add new network.cache_dir config to the config parsing tests
2023-06-06 01:28:14 -07:00
pub async fn peer_cache_updater(
config: Config,
address_book: Arc<Mutex<AddressBook>>,
) -> Result<(), BoxError> {
// Wait until we've queried DNS and (hopefully) sent peers to the address book.
// Ideally we'd wait for at least one peer crawl, but that makes tests very slow.
//
// TODO: turn the initial sleep time into a parameter of this function,
// and allow it to be set in tests
sleep(DNS_LOOKUP_TIMEOUT * 4).await;
feat(net): Cache a list of useful peers on disk (#6739) * Rewrite some state cache docs to clarify * Add a zebra_network::Config.cache_dir for peer address caches * Add new config test files and fix config test failure message * Create some zebra-chain and zebra-network convenience functions * Add methods for reading and writing the peer address cache * Add cached disk peers to the initial peers list * Add metrics and logging for loading and storing the peer cache * Replace log of useless redacted peer IP addresses * Limit the peer cache minimum and maximum size, don't write empty caches * Add a cacheable_peers() method to the address book * Add a peer disk cache updater task to the peer set tasks * Document that the peer cache is shared by multiple instances unless configured otherwise * Disable peer cache read/write in disconnected tests * Make initial peer cache updater sleep shorter for tests * Add unit tests for reading and writing the peer cache * Update the task list in the start command docs * Modify the existing persistent acceptance test to check for peer caches * Update the peer cache directory when writing test configs * Add a CacheDir type so the default config can be enabled, but tests can disable it * Update tests to use the CacheDir config type * Rename some CacheDir internals * Add config file test cases for each kind of CacheDir config * Panic if the config contains invalid socket addresses, rather than continuing * Add a network directory to state cache directory contents tests * Add new network.cache_dir config to the config parsing tests
2023-06-06 01:28:14 -07:00
loop {
// Ignore errors because updating the cache is optional.
// Errors are already logged by the functions we're calling.
let _ = update_peer_cache_once(&config, &address_book).await;
sleep(PEER_DISK_CACHE_UPDATE_INTERVAL).await;
}
}
/// Caches peers from the current `address_book` to disk, based on `config`.
pub async fn update_peer_cache_once(
config: &Config,
address_book: &Arc<Mutex<AddressBook>>,
) -> io::Result<()> {
let peer_list = cacheable_peers(address_book)
.iter()
.map(|meta_addr| meta_addr.addr)
.collect();
config.update_peer_cache(peer_list).await
}
/// Returns a list of cacheable peers, blocking for as short a time as possible.
fn cacheable_peers(address_book: &Arc<Mutex<AddressBook>>) -> Vec<MetaAddr> {
// TODO: use spawn_blocking() here, if needed to handle address book mutex load
let now = Utc::now();
// # Concurrency
//
// We return from this function immediately to make sure the address book is unlocked.
address_book
.lock()
.expect("unexpected panic in previous thread while accessing the address book")
.cacheable(now)
}