Format the string literals in /validator directory (#35261)

There are lots of string literals in the /validator directory,
including many for CLI help and error messages. Any string literals
that exceed 100 characters prevent rustfmt from running properly.

This change temporarily set format_string = true in rustfmt.toml, and
then ran the linter on the validator directory. This was followed up
by manually tweaking several strings that were already well crafted
for readability (and within the 100 character limit)
This commit is contained in:
steviez 2024-02-21 16:12:23 -06:00 committed by GitHub
parent 531793b4be
commit 537c3d8e2c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 599 additions and 410 deletions

View File

@ -615,10 +615,9 @@ impl AdminRpc for AdminRpcImpl {
.tpu(Protocol::UDP)
.map_err(|err| {
error!(
"The public TPU address isn't being published. \
The node is likely in repair mode. \
See help for --restricted-repair-only-mode for more information. \
{err}"
"The public TPU address isn't being published. The node is likely in \
repair mode. See help for --restricted-repair-only-mode for more \
information. {err}"
);
jsonrpc_core::error::Error::internal_error()
})?;
@ -653,10 +652,9 @@ impl AdminRpc for AdminRpcImpl {
.tpu_forwards(Protocol::UDP)
.map_err(|err| {
error!(
"The public TPU Forwards address isn't being published. \
The node is likely in repair mode. \
See help for --restricted-repair-only-mode for more information. \
{err}"
"The public TPU Forwards address isn't being published. The node is \
likely in repair mode. See help for --restricted-repair-only-mode for \
more information. {err}"
);
jsonrpc_core::error::Error::internal_error()
})?;

View File

@ -285,17 +285,20 @@ fn main() {
let warp_slot = if matches.is_present("warp_slot") {
Some(match matches.value_of("warp_slot") {
Some(_) => value_t_or_exit!(matches, "warp_slot", Slot),
None => {
cluster_rpc_client.as_ref().unwrap_or_else(|_| {
println!("The --url argument must be provided if --warp-slot/-w is used without an explicit slot");
exit(1);
}).get_slot()
.unwrap_or_else(|err| {
println!("Unable to get current cluster slot: {err}");
exit(1);
})
}
None => cluster_rpc_client
.as_ref()
.unwrap_or_else(|_| {
println!(
"The --url argument must be provided if --warp-slot/-w is used without an \
explicit slot"
);
exit(1);
})
.get_slot()
.unwrap_or_else(|err| {
println!("Unable to get current cluster slot: {err}");
exit(1);
}),
})
} else {
None

View File

@ -237,7 +237,10 @@ fn get_rpc_peers(
})
.count();
info!("Total {rpc_peers_total} RPC nodes found. {rpc_known_peers} known, {rpc_peers_blacklisted} blacklisted");
info!(
"Total {rpc_peers_total} RPC nodes found. {rpc_known_peers} known, \
{rpc_peers_blacklisted} blacklisted"
);
if rpc_peers_blacklisted == rpc_peers_total {
*retry_reason = if !blacklisted_rpc_nodes.is_empty()
@ -487,9 +490,9 @@ fn get_vetted_rpc_nodes(
Ok(rpc_node_details) => rpc_node_details,
Err(err) => {
error!(
"Failed to get RPC nodes: {err}. Consider checking system \
clock, removing `--no-port-check`, or adjusting \
`--known-validator ...` arguments as applicable"
"Failed to get RPC nodes: {err}. Consider checking system clock, removing \
`--no-port-check`, or adjusting `--known-validator ...` arguments as \
applicable"
);
exit(1);
}
@ -905,9 +908,8 @@ fn get_snapshot_hashes_from_known_validators(
get_snapshot_hashes_for_node,
) {
debug!(
"Snapshot hashes have not been discovered from known validators. \
This likely means the gossip tables are not fully populated. \
We will sleep and retry..."
"Snapshot hashes have not been discovered from known validators. This likely means \
the gossip tables are not fully populated. We will sleep and retry..."
);
return KnownSnapshotHashes::default();
}
@ -981,8 +983,9 @@ fn build_known_snapshot_hashes<'a>(
// hashes. So if it happens, keep the first and ignore the rest.
if is_any_same_slot_and_different_hash(&full_snapshot_hash, known_snapshot_hashes.keys()) {
warn!(
"Ignoring all snapshot hashes from node {node} since we've seen a different full snapshot hash with this slot.\
\nfull snapshot hash: {full_snapshot_hash:?}"
"Ignoring all snapshot hashes from node {node} since we've seen a different full \
snapshot hash with this slot.\
\nfull snapshot hash: {full_snapshot_hash:?}"
);
debug!(
"known full snapshot hashes: {:#?}",
@ -1007,9 +1010,10 @@ fn build_known_snapshot_hashes<'a>(
known_incremental_snapshot_hashes.iter(),
) {
warn!(
"Ignoring incremental snapshot hash from node {node} since we've seen a different incremental snapshot hash with this slot.\
\nfull snapshot hash: {full_snapshot_hash:?}\
\nincremental snapshot hash: {incremental_snapshot_hash:?}"
"Ignoring incremental snapshot hash from node {node} since we've seen a \
different incremental snapshot hash with this slot.\
\nfull snapshot hash: {full_snapshot_hash:?}\
\nincremental snapshot hash: {incremental_snapshot_hash:?}"
);
debug!(
"known incremental snapshot hashes based on this slot: {:#?}",
@ -1112,7 +1116,10 @@ fn retain_peer_snapshot_hashes_with_highest_incremental_snapshot_slot(
peer_snapshot_hash.snapshot_hash.incr == highest_incremental_snapshot_hash
});
trace!("retain peer snapshot hashes with highest incremental snapshot slot: {peer_snapshot_hashes:?}");
trace!(
"retain peer snapshot hashes with highest incremental snapshot slot: \
{peer_snapshot_hashes:?}"
);
}
/// Check to see if we can use our local snapshots, otherwise download newer ones.
@ -1192,7 +1199,8 @@ fn download_snapshots(
})
{
info!(
"Incremental snapshot archive already exists locally. Skipping download. slot: {}, hash: {}",
"Incremental snapshot archive already exists locally. Skipping download. \
slot: {}, hash: {}",
incremental_snapshot_hash.0, incremental_snapshot_hash.1
);
} else {
@ -1272,9 +1280,9 @@ fn download_snapshot(
{
warn!(
"The snapshot download is too slow, throughput: {} < min speed {} \
bytes/sec, but will NOT abort and try a different node as it is the \
only known validator and the --only-known-rpc flag is set. \
Abort count: {}, Progress detail: {:?}",
bytes/sec, but will NOT abort and try a different node as it is the \
only known validator and the --only-known-rpc flag is set. Abort \
count: {}, Progress detail: {:?}",
download_progress.last_throughput,
minimal_snapshot_download_speed,
download_abort_count,
@ -1284,9 +1292,8 @@ fn download_snapshot(
}
}
warn!(
"The snapshot download is too slow, throughput: {} < min speed {} \
bytes/sec, will abort and try a different node. \
Abort count: {}, Progress detail: {:?}",
"The snapshot download is too slow, throughput: {} < min speed {} bytes/sec, \
will abort and try a different node. Abort count: {}, Progress detail: {:?}",
download_progress.last_throughput,
minimal_snapshot_download_speed,
download_abort_count,
@ -1321,17 +1328,26 @@ fn should_use_local_snapshot(
incremental_snapshot_fetch,
) {
None => {
info!("Downloading a snapshot for slot {cluster_snapshot_slot} since there is not a local snapshot.");
info!(
"Downloading a snapshot for slot {cluster_snapshot_slot} since there is not a \
local snapshot."
);
false
}
Some((local_snapshot_slot, _)) => {
if local_snapshot_slot
>= cluster_snapshot_slot.saturating_sub(maximum_local_snapshot_age)
{
info!("Reusing local snapshot at slot {local_snapshot_slot} instead of downloading a snapshot for slot {cluster_snapshot_slot}.");
info!(
"Reusing local snapshot at slot {local_snapshot_slot} instead of downloading \
a snapshot for slot {cluster_snapshot_slot}."
);
true
} else {
info!("Local snapshot from slot {local_snapshot_slot} is too old. Downloading a newer snapshot for slot {cluster_snapshot_slot}.");
info!(
"Local snapshot from slot {local_snapshot_slot} is too old. Downloading a \
newer snapshot for slot {cluster_snapshot_slot}."
);
false
}
}

File diff suppressed because it is too large Load Diff

View File

@ -154,10 +154,9 @@ impl Dashboard {
};
progress_bar.set_message(format!(
"{}{}| \
Processed Slot: {} | Confirmed Slot: {} | Finalized Slot: {} | \
Full Snapshot Slot: {} | Incremental Snapshot Slot: {} | \
Transactions: {} | {}",
"{}{}| Processed Slot: {} | Confirmed Slot: {} | Finalized Slot: {} | \
Full Snapshot Slot: {} | Incremental Snapshot Slot: {} | \
Transactions: {} | {}",
uptime,
if health == "ok" {
"".to_string()

View File

@ -218,7 +218,8 @@ fn wait_for_restart_window(
}
if !leader_schedule.is_empty() && upcoming_idle_windows.is_empty() {
return Err(format!(
"Validator has no idle window of at least {} slots. Largest idle window for epoch {} is {} slots",
"Validator has no idle window of at least {} slots. Largest idle window \
for epoch {} is {} slots",
min_idle_slots, epoch_info.epoch, max_idle_window
)
.into());
@ -272,7 +273,8 @@ fn wait_for_restart_window(
)
}
None => format!(
"Validator will be leader soon. Next leader slot is {next_leader_slot}"
"Validator will be leader soon. Next leader slot is \
{next_leader_slot}"
),
})
}
@ -865,11 +867,14 @@ pub fn main() {
("set-public-address", Some(subcommand_matches)) => {
let parse_arg_addr = |arg_name: &str, arg_long: &str| -> Option<SocketAddr> {
subcommand_matches.value_of(arg_name).map(|host_port| {
solana_net_utils::parse_host_port(host_port).unwrap_or_else(|err| {
eprintln!("Failed to parse --{arg_long} address. It must be in the HOST:PORT format. {err}");
exit(1);
})
solana_net_utils::parse_host_port(host_port).unwrap_or_else(|err| {
eprintln!(
"Failed to parse --{arg_long} address. It must be in the HOST:PORT \
format. {err}"
);
exit(1);
})
})
};
let tpu_addr = parse_arg_addr("tpu_addr", "tpu");
let tpu_forwards_addr = parse_arg_addr("tpu_forwards_addr", "tpu-forwards");
@ -1081,7 +1086,8 @@ pub fn main() {
let shrink_ratio = value_t_or_exit!(matches, "accounts_shrink_ratio", f64);
if !(0.0..=1.0).contains(&shrink_ratio) {
eprintln!(
"The specified account-shrink-ratio is invalid, it must be between 0. and 1.0 inclusive: {shrink_ratio}"
"The specified account-shrink-ratio is invalid, it must be between 0. and 1.0 \
inclusive: {shrink_ratio}"
);
exit(1);
}
@ -1285,7 +1291,8 @@ pub fn main() {
if rpc_send_batch_send_rate_ms > rpc_send_retry_rate_ms {
eprintln!(
"The specified rpc-send-batch-ms ({rpc_send_batch_send_rate_ms}) is invalid, it must be <= rpc-send-retry-ms ({rpc_send_retry_rate_ms})"
"The specified rpc-send-batch-ms ({rpc_send_batch_send_rate_ms}) is invalid, it must \
be <= rpc-send-retry-ms ({rpc_send_retry_rate_ms})"
);
exit(1);
}
@ -1294,7 +1301,7 @@ pub fn main() {
if tps > send_transaction_service::MAX_TRANSACTION_SENDS_PER_SECOND {
eprintln!(
"Either the specified rpc-send-batch-size ({}) or rpc-send-batch-ms ({}) is invalid, \
'rpc-send-batch-size * 1000 / rpc-send-batch-ms' must be smaller than ({}) .",
'rpc-send-batch-size * 1000 / rpc-send-batch-ms' must be smaller than ({}) .",
rpc_send_batch_size,
rpc_send_batch_send_rate_ms,
send_transaction_service::MAX_TRANSACTION_SENDS_PER_SECOND
@ -1613,14 +1620,25 @@ pub fn main() {
&validator_config.snapshot_config,
validator_config.accounts_hash_interval_slots,
) {
eprintln!("Invalid snapshot configuration provided: snapshot intervals are incompatible. \
\n\t- full snapshot interval MUST be a multiple of incremental snapshot interval (if enabled) \
\n\t- full snapshot interval MUST be larger than incremental snapshot interval (if enabled) \
\nSnapshot configuration values: \
\n\tfull snapshot interval: {} \
\n\tincremental snapshot interval: {}",
if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { "disabled".to_string() } else { full_snapshot_archive_interval_slots.to_string() },
if incremental_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { "disabled".to_string() } else { incremental_snapshot_archive_interval_slots.to_string() },
eprintln!(
"Invalid snapshot configuration provided: snapshot intervals are incompatible. \
\n\t- full snapshot interval MUST be a multiple of incremental snapshot interval (if \
enabled)\
\n\t- full snapshot interval MUST be larger than incremental snapshot \
interval (if enabled)\
\nSnapshot configuration values:\
\n\tfull snapshot interval: {}\
\n\tincremental snapshot interval: {}",
if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL {
"disabled".to_string()
} else {
full_snapshot_archive_interval_slots.to_string()
},
if incremental_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL {
"disabled".to_string()
} else {
incremental_snapshot_archive_interval_slots.to_string()
},
);
exit(1);
}
@ -1632,7 +1650,8 @@ pub fn main() {
};
if limit_ledger_size < DEFAULT_MIN_MAX_LEDGER_SHREDS {
eprintln!(
"The provided --limit-ledger-size value was too small, the minimum value is {DEFAULT_MIN_MAX_LEDGER_SHREDS}"
"The provided --limit-ledger-size value was too small, the minimum value is \
{DEFAULT_MIN_MAX_LEDGER_SHREDS}"
);
exit(1);
}