Format the string literals in /validator directory (#35261)

There are lots of string literals in the /validator directory,
including many for CLI help and error messages. Any string literals
that exceed 100 characters prevent rustfmt from running properly.

This change temporarily set format_string = true in rustfmt.toml, and
then ran the linter on the validator directory. This was followed up
by manually tweaking several strings that were already well crafted
for readability (and within the 100 character limit)
This commit is contained in:
steviez 2024-02-21 16:12:23 -06:00 committed by GitHub
parent 531793b4be
commit 537c3d8e2c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 599 additions and 410 deletions

View File

@ -615,10 +615,9 @@ impl AdminRpc for AdminRpcImpl {
.tpu(Protocol::UDP) .tpu(Protocol::UDP)
.map_err(|err| { .map_err(|err| {
error!( error!(
"The public TPU address isn't being published. \ "The public TPU address isn't being published. The node is likely in \
The node is likely in repair mode. \ repair mode. See help for --restricted-repair-only-mode for more \
See help for --restricted-repair-only-mode for more information. \ information. {err}"
{err}"
); );
jsonrpc_core::error::Error::internal_error() jsonrpc_core::error::Error::internal_error()
})?; })?;
@ -653,10 +652,9 @@ impl AdminRpc for AdminRpcImpl {
.tpu_forwards(Protocol::UDP) .tpu_forwards(Protocol::UDP)
.map_err(|err| { .map_err(|err| {
error!( error!(
"The public TPU Forwards address isn't being published. \ "The public TPU Forwards address isn't being published. The node is \
The node is likely in repair mode. \ likely in repair mode. See help for --restricted-repair-only-mode for \
See help for --restricted-repair-only-mode for more information. \ more information. {err}"
{err}"
); );
jsonrpc_core::error::Error::internal_error() jsonrpc_core::error::Error::internal_error()
})?; })?;

View File

@ -285,17 +285,20 @@ fn main() {
let warp_slot = if matches.is_present("warp_slot") { let warp_slot = if matches.is_present("warp_slot") {
Some(match matches.value_of("warp_slot") { Some(match matches.value_of("warp_slot") {
Some(_) => value_t_or_exit!(matches, "warp_slot", Slot), Some(_) => value_t_or_exit!(matches, "warp_slot", Slot),
None => { None => cluster_rpc_client
cluster_rpc_client.as_ref().unwrap_or_else(|_| { .as_ref()
println!("The --url argument must be provided if --warp-slot/-w is used without an explicit slot"); .unwrap_or_else(|_| {
exit(1); println!(
"The --url argument must be provided if --warp-slot/-w is used without an \
}).get_slot() explicit slot"
.unwrap_or_else(|err| { );
println!("Unable to get current cluster slot: {err}"); exit(1);
exit(1); })
}) .get_slot()
} .unwrap_or_else(|err| {
println!("Unable to get current cluster slot: {err}");
exit(1);
}),
}) })
} else { } else {
None None

View File

@ -237,7 +237,10 @@ fn get_rpc_peers(
}) })
.count(); .count();
info!("Total {rpc_peers_total} RPC nodes found. {rpc_known_peers} known, {rpc_peers_blacklisted} blacklisted"); info!(
"Total {rpc_peers_total} RPC nodes found. {rpc_known_peers} known, \
{rpc_peers_blacklisted} blacklisted"
);
if rpc_peers_blacklisted == rpc_peers_total { if rpc_peers_blacklisted == rpc_peers_total {
*retry_reason = if !blacklisted_rpc_nodes.is_empty() *retry_reason = if !blacklisted_rpc_nodes.is_empty()
@ -487,9 +490,9 @@ fn get_vetted_rpc_nodes(
Ok(rpc_node_details) => rpc_node_details, Ok(rpc_node_details) => rpc_node_details,
Err(err) => { Err(err) => {
error!( error!(
"Failed to get RPC nodes: {err}. Consider checking system \ "Failed to get RPC nodes: {err}. Consider checking system clock, removing \
clock, removing `--no-port-check`, or adjusting \ `--no-port-check`, or adjusting `--known-validator ...` arguments as \
`--known-validator ...` arguments as applicable" applicable"
); );
exit(1); exit(1);
} }
@ -905,9 +908,8 @@ fn get_snapshot_hashes_from_known_validators(
get_snapshot_hashes_for_node, get_snapshot_hashes_for_node,
) { ) {
debug!( debug!(
"Snapshot hashes have not been discovered from known validators. \ "Snapshot hashes have not been discovered from known validators. This likely means \
This likely means the gossip tables are not fully populated. \ the gossip tables are not fully populated. We will sleep and retry..."
We will sleep and retry..."
); );
return KnownSnapshotHashes::default(); return KnownSnapshotHashes::default();
} }
@ -981,8 +983,9 @@ fn build_known_snapshot_hashes<'a>(
// hashes. So if it happens, keep the first and ignore the rest. // hashes. So if it happens, keep the first and ignore the rest.
if is_any_same_slot_and_different_hash(&full_snapshot_hash, known_snapshot_hashes.keys()) { if is_any_same_slot_and_different_hash(&full_snapshot_hash, known_snapshot_hashes.keys()) {
warn!( warn!(
"Ignoring all snapshot hashes from node {node} since we've seen a different full snapshot hash with this slot.\ "Ignoring all snapshot hashes from node {node} since we've seen a different full \
\nfull snapshot hash: {full_snapshot_hash:?}" snapshot hash with this slot.\
\nfull snapshot hash: {full_snapshot_hash:?}"
); );
debug!( debug!(
"known full snapshot hashes: {:#?}", "known full snapshot hashes: {:#?}",
@ -1007,9 +1010,10 @@ fn build_known_snapshot_hashes<'a>(
known_incremental_snapshot_hashes.iter(), known_incremental_snapshot_hashes.iter(),
) { ) {
warn!( warn!(
"Ignoring incremental snapshot hash from node {node} since we've seen a different incremental snapshot hash with this slot.\ "Ignoring incremental snapshot hash from node {node} since we've seen a \
\nfull snapshot hash: {full_snapshot_hash:?}\ different incremental snapshot hash with this slot.\
\nincremental snapshot hash: {incremental_snapshot_hash:?}" \nfull snapshot hash: {full_snapshot_hash:?}\
\nincremental snapshot hash: {incremental_snapshot_hash:?}"
); );
debug!( debug!(
"known incremental snapshot hashes based on this slot: {:#?}", "known incremental snapshot hashes based on this slot: {:#?}",
@ -1112,7 +1116,10 @@ fn retain_peer_snapshot_hashes_with_highest_incremental_snapshot_slot(
peer_snapshot_hash.snapshot_hash.incr == highest_incremental_snapshot_hash peer_snapshot_hash.snapshot_hash.incr == highest_incremental_snapshot_hash
}); });
trace!("retain peer snapshot hashes with highest incremental snapshot slot: {peer_snapshot_hashes:?}"); trace!(
"retain peer snapshot hashes with highest incremental snapshot slot: \
{peer_snapshot_hashes:?}"
);
} }
/// Check to see if we can use our local snapshots, otherwise download newer ones. /// Check to see if we can use our local snapshots, otherwise download newer ones.
@ -1192,7 +1199,8 @@ fn download_snapshots(
}) })
{ {
info!( info!(
"Incremental snapshot archive already exists locally. Skipping download. slot: {}, hash: {}", "Incremental snapshot archive already exists locally. Skipping download. \
slot: {}, hash: {}",
incremental_snapshot_hash.0, incremental_snapshot_hash.1 incremental_snapshot_hash.0, incremental_snapshot_hash.1
); );
} else { } else {
@ -1272,9 +1280,9 @@ fn download_snapshot(
{ {
warn!( warn!(
"The snapshot download is too slow, throughput: {} < min speed {} \ "The snapshot download is too slow, throughput: {} < min speed {} \
bytes/sec, but will NOT abort and try a different node as it is the \ bytes/sec, but will NOT abort and try a different node as it is the \
only known validator and the --only-known-rpc flag is set. \ only known validator and the --only-known-rpc flag is set. Abort \
Abort count: {}, Progress detail: {:?}", count: {}, Progress detail: {:?}",
download_progress.last_throughput, download_progress.last_throughput,
minimal_snapshot_download_speed, minimal_snapshot_download_speed,
download_abort_count, download_abort_count,
@ -1284,9 +1292,8 @@ fn download_snapshot(
} }
} }
warn!( warn!(
"The snapshot download is too slow, throughput: {} < min speed {} \ "The snapshot download is too slow, throughput: {} < min speed {} bytes/sec, \
bytes/sec, will abort and try a different node. \ will abort and try a different node. Abort count: {}, Progress detail: {:?}",
Abort count: {}, Progress detail: {:?}",
download_progress.last_throughput, download_progress.last_throughput,
minimal_snapshot_download_speed, minimal_snapshot_download_speed,
download_abort_count, download_abort_count,
@ -1321,17 +1328,26 @@ fn should_use_local_snapshot(
incremental_snapshot_fetch, incremental_snapshot_fetch,
) { ) {
None => { None => {
info!("Downloading a snapshot for slot {cluster_snapshot_slot} since there is not a local snapshot."); info!(
"Downloading a snapshot for slot {cluster_snapshot_slot} since there is not a \
local snapshot."
);
false false
} }
Some((local_snapshot_slot, _)) => { Some((local_snapshot_slot, _)) => {
if local_snapshot_slot if local_snapshot_slot
>= cluster_snapshot_slot.saturating_sub(maximum_local_snapshot_age) >= cluster_snapshot_slot.saturating_sub(maximum_local_snapshot_age)
{ {
info!("Reusing local snapshot at slot {local_snapshot_slot} instead of downloading a snapshot for slot {cluster_snapshot_slot}."); info!(
"Reusing local snapshot at slot {local_snapshot_slot} instead of downloading \
a snapshot for slot {cluster_snapshot_slot}."
);
true true
} else { } else {
info!("Local snapshot from slot {local_snapshot_slot} is too old. Downloading a newer snapshot for slot {cluster_snapshot_slot}."); info!(
"Local snapshot from slot {local_snapshot_slot} is too old. Downloading a \
newer snapshot for slot {cluster_snapshot_slot}."
);
false false
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -154,10 +154,9 @@ impl Dashboard {
}; };
progress_bar.set_message(format!( progress_bar.set_message(format!(
"{}{}| \ "{}{}| Processed Slot: {} | Confirmed Slot: {} | Finalized Slot: {} | \
Processed Slot: {} | Confirmed Slot: {} | Finalized Slot: {} | \ Full Snapshot Slot: {} | Incremental Snapshot Slot: {} | \
Full Snapshot Slot: {} | Incremental Snapshot Slot: {} | \ Transactions: {} | {}",
Transactions: {} | {}",
uptime, uptime,
if health == "ok" { if health == "ok" {
"".to_string() "".to_string()

View File

@ -218,7 +218,8 @@ fn wait_for_restart_window(
} }
if !leader_schedule.is_empty() && upcoming_idle_windows.is_empty() { if !leader_schedule.is_empty() && upcoming_idle_windows.is_empty() {
return Err(format!( return Err(format!(
"Validator has no idle window of at least {} slots. Largest idle window for epoch {} is {} slots", "Validator has no idle window of at least {} slots. Largest idle window \
for epoch {} is {} slots",
min_idle_slots, epoch_info.epoch, max_idle_window min_idle_slots, epoch_info.epoch, max_idle_window
) )
.into()); .into());
@ -272,7 +273,8 @@ fn wait_for_restart_window(
) )
} }
None => format!( None => format!(
"Validator will be leader soon. Next leader slot is {next_leader_slot}" "Validator will be leader soon. Next leader slot is \
{next_leader_slot}"
), ),
}) })
} }
@ -865,11 +867,14 @@ pub fn main() {
("set-public-address", Some(subcommand_matches)) => { ("set-public-address", Some(subcommand_matches)) => {
let parse_arg_addr = |arg_name: &str, arg_long: &str| -> Option<SocketAddr> { let parse_arg_addr = |arg_name: &str, arg_long: &str| -> Option<SocketAddr> {
subcommand_matches.value_of(arg_name).map(|host_port| { subcommand_matches.value_of(arg_name).map(|host_port| {
solana_net_utils::parse_host_port(host_port).unwrap_or_else(|err| { solana_net_utils::parse_host_port(host_port).unwrap_or_else(|err| {
eprintln!("Failed to parse --{arg_long} address. It must be in the HOST:PORT format. {err}"); eprintln!(
exit(1); "Failed to parse --{arg_long} address. It must be in the HOST:PORT \
}) format. {err}"
);
exit(1);
}) })
})
}; };
let tpu_addr = parse_arg_addr("tpu_addr", "tpu"); let tpu_addr = parse_arg_addr("tpu_addr", "tpu");
let tpu_forwards_addr = parse_arg_addr("tpu_forwards_addr", "tpu-forwards"); let tpu_forwards_addr = parse_arg_addr("tpu_forwards_addr", "tpu-forwards");
@ -1081,7 +1086,8 @@ pub fn main() {
let shrink_ratio = value_t_or_exit!(matches, "accounts_shrink_ratio", f64); let shrink_ratio = value_t_or_exit!(matches, "accounts_shrink_ratio", f64);
if !(0.0..=1.0).contains(&shrink_ratio) { if !(0.0..=1.0).contains(&shrink_ratio) {
eprintln!( eprintln!(
"The specified account-shrink-ratio is invalid, it must be between 0. and 1.0 inclusive: {shrink_ratio}" "The specified account-shrink-ratio is invalid, it must be between 0. and 1.0 \
inclusive: {shrink_ratio}"
); );
exit(1); exit(1);
} }
@ -1285,7 +1291,8 @@ pub fn main() {
if rpc_send_batch_send_rate_ms > rpc_send_retry_rate_ms { if rpc_send_batch_send_rate_ms > rpc_send_retry_rate_ms {
eprintln!( eprintln!(
"The specified rpc-send-batch-ms ({rpc_send_batch_send_rate_ms}) is invalid, it must be <= rpc-send-retry-ms ({rpc_send_retry_rate_ms})" "The specified rpc-send-batch-ms ({rpc_send_batch_send_rate_ms}) is invalid, it must \
be <= rpc-send-retry-ms ({rpc_send_retry_rate_ms})"
); );
exit(1); exit(1);
} }
@ -1294,7 +1301,7 @@ pub fn main() {
if tps > send_transaction_service::MAX_TRANSACTION_SENDS_PER_SECOND { if tps > send_transaction_service::MAX_TRANSACTION_SENDS_PER_SECOND {
eprintln!( eprintln!(
"Either the specified rpc-send-batch-size ({}) or rpc-send-batch-ms ({}) is invalid, \ "Either the specified rpc-send-batch-size ({}) or rpc-send-batch-ms ({}) is invalid, \
'rpc-send-batch-size * 1000 / rpc-send-batch-ms' must be smaller than ({}) .", 'rpc-send-batch-size * 1000 / rpc-send-batch-ms' must be smaller than ({}) .",
rpc_send_batch_size, rpc_send_batch_size,
rpc_send_batch_send_rate_ms, rpc_send_batch_send_rate_ms,
send_transaction_service::MAX_TRANSACTION_SENDS_PER_SECOND send_transaction_service::MAX_TRANSACTION_SENDS_PER_SECOND
@ -1613,14 +1620,25 @@ pub fn main() {
&validator_config.snapshot_config, &validator_config.snapshot_config,
validator_config.accounts_hash_interval_slots, validator_config.accounts_hash_interval_slots,
) { ) {
eprintln!("Invalid snapshot configuration provided: snapshot intervals are incompatible. \ eprintln!(
\n\t- full snapshot interval MUST be a multiple of incremental snapshot interval (if enabled) \ "Invalid snapshot configuration provided: snapshot intervals are incompatible. \
\n\t- full snapshot interval MUST be larger than incremental snapshot interval (if enabled) \ \n\t- full snapshot interval MUST be a multiple of incremental snapshot interval (if \
\nSnapshot configuration values: \ enabled)\
\n\tfull snapshot interval: {} \ \n\t- full snapshot interval MUST be larger than incremental snapshot \
\n\tincremental snapshot interval: {}", interval (if enabled)\
if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { "disabled".to_string() } else { full_snapshot_archive_interval_slots.to_string() }, \nSnapshot configuration values:\
if incremental_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { "disabled".to_string() } else { incremental_snapshot_archive_interval_slots.to_string() }, \n\tfull snapshot interval: {}\
\n\tincremental snapshot interval: {}",
if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL {
"disabled".to_string()
} else {
full_snapshot_archive_interval_slots.to_string()
},
if incremental_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL {
"disabled".to_string()
} else {
incremental_snapshot_archive_interval_slots.to_string()
},
); );
exit(1); exit(1);
} }
@ -1632,7 +1650,8 @@ pub fn main() {
}; };
if limit_ledger_size < DEFAULT_MIN_MAX_LEDGER_SHREDS { if limit_ledger_size < DEFAULT_MIN_MAX_LEDGER_SHREDS {
eprintln!( eprintln!(
"The provided --limit-ledger-size value was too small, the minimum value is {DEFAULT_MIN_MAX_LEDGER_SHREDS}" "The provided --limit-ledger-size value was too small, the minimum value is \
{DEFAULT_MIN_MAX_LEDGER_SHREDS}"
); );
exit(1); exit(1);
} }