Remove some TODOs (#6488)

* Remove stale TODOs

* Ban TODO markers from markdown

* Scrub all TODOs from ci/ and book/
This commit is contained in:
Michael Vines 2019-10-21 22:25:06 -07:00 committed by GitHub
parent 1b1980ad49
commit f80a5b8c34
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 27 additions and 94 deletions

View File

@ -17,7 +17,7 @@ The following sections outline how this architecture would work:
* The keypair is ephemeral. A new keypair is generated on node bootup. A
new keypair might also be generated at runtime based on some TBD
new keypair might also be generated at runtime based on some to be determined
criteria.
@ -28,7 +28,7 @@ The following sections outline how this architecture would work:
signed by a trusted party
3. The stakeholder of the node grants ephemeral key permission to use its stake.
This process is TBD.
This process is to be determined.
4. The node's untrusted, non-enclave software calls trusted enclave software
@ -40,7 +40,7 @@ The following sections outline how this architecture would work:
presented with some verifiable data to check before signing the vote.
* The process of generating the verifiable data in untrusted space is TBD
* The process of generating the verifiable data in untrusted space is to be determined
### PoH Verification

View File

@ -20,7 +20,7 @@ Create a new Azure-based "queue=default" agent by running the following command:
```
$ az vm create \
--resource-group ci \
--name XXX \
--name XYZ \
--image boilerplate \
--admin-username $(whoami) \
--ssh-key-value ~/.ssh/id_rsa.pub
@ -42,11 +42,11 @@ Creating a "queue=cuda" agent follows the same process but additionally:
1. When ready, ssh into the instance and start a root shell with `sudo -i`. Then
prepare it for deallocation by running:
`waagent -deprovision+user; cd /etc; ln -s ../run/systemd/resolve/stub-resolv.conf resolv.conf`
1. Run `az vm deallocate --resource-group ci --name XXX`
1. Run `az vm generalize --resource-group ci --name XXX`
1. Run `az image create --resource-group ci --source XXX --name boilerplate`
1. Run `az vm deallocate --resource-group ci --name XYZ`
1. Run `az vm generalize --resource-group ci --name XYZ`
1. Run `az image create --resource-group ci --source XYZ --name boilerplate`
1. Goto the `ci` resource group in the Azure portal and remove all resources
with the XXX name in them
with the XYZ name in them
## Reference
@ -95,7 +95,7 @@ VM Instances in each group is manually adjusted.
Each Instance group has its own disk image, `ci-default-vX` and
`ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed.
The process to update a disk image is as follows (TODO: make this less manual):
The manual process to update a disk image is as follows:
1. Create a new VM Instance using the disk image to modify.
2. Once the VM boots, ssh to it and modify the disk as desired.

View File

@ -29,12 +29,6 @@ steps:
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
name: "coverage"
timeout_in_minutes: 40
# TODO: Fix and re-enable test-large-network.sh
# - command: "ci/test-large-network.sh || true"
# name: "large-network [ignored]"
# timeout_in_minutes: 20
# agents:
# - "queue=large"
- wait
- trigger: "solana-secondary"
branches: "!pull/*"

View File

@ -57,7 +57,7 @@ declare useGithubIssueInsteadOf=(
#'TODO' # TODO: Uncomment this line to disable TODOs
)
if _ git --no-pager grep -n --max-depth=0 "${useGithubIssueInsteadOf[@]/#/-e }" -- '*.rs' '*.sh'; then
if _ git --no-pager grep -n --max-depth=0 "${useGithubIssueInsteadOf[@]/#/-e }" -- '*.rs' '*.sh' '*.md'; then
exit 1
fi

View File

@ -46,7 +46,7 @@ def get_packages():
max_iterations = pow(len(dependency_graph),2)
while dependency_graph:
if max_iterations == 0:
# TODO: Be more helpful and find the actual cycle for the user
# One day be more helpful and find the actual cycle for the user...
sys.exit('Error: Circular dependency suspected between these packages: {}\n'.format(' '.join(dependency_graph.keys())))
max_iterations -= 1

View File

@ -50,8 +50,8 @@ for Cargo_toml in $Cargo_tomls; do
(
set -x
crate=$(dirname "$Cargo_toml")
# TODO: the rocksdb package does not build with the stock rust docker image,
# so use the solana rust docker image until this is resolved upstream
# The rocksdb package does not build with the stock rust docker image so use
# the solana rust docker image
cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues

View File

@ -46,7 +46,6 @@ beta)
;;
stable)
# Set to whatever branch 'testnet' is on.
# TODO: Revert to $STABLE_CHANNEL for TdS
CHANNEL_BRANCH=$BETA_CHANNEL
;;
*)

View File

@ -70,9 +70,9 @@ _ cargo +$rust_nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verb
# Run banking bench. Doesn't require nightly, but use since it is already built.
_ cargo +$rust_nightly run --release --manifest-path banking_bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
# TODO: debug why solana-upload-perf takes over 30 minutes to complete.
# `solana-upload-perf` disabled as it can take over 30 minutes to complete for some
# reason
exit 0
_ cargo +$rust_nightly run --release --package solana-upload-perf \
-- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" | tee "$BENCH_ARTIFACT"

View File

@ -1,41 +0,0 @@
#!/usr/bin/env bash
set -e
here=$(dirname "$0")
cd "$here"/..
source ci/rust-version.sh stable
export RUST_BACKTRACE=1
rm -rf target/perf-libs
./fetch-perf-libs.sh
export LD_LIBRARY_PATH=$PWD/target/perf-libs:$LD_LIBRARY_PATH
export RUST_LOG=multinode=info
source scripts/ulimit-n.sh
if [[ $(sysctl -n net.core.rmem_default) -lt 1610612736 ]]; then
echo 'Error: rmem_default too small, run "sudo sysctl -w net.core.rmem_default=1610612736" to continue'
exit 1
fi
if [[ $(sysctl -n net.core.rmem_max) -lt 1610612736 ]]; then
echo 'Error: rmem_max too small, run "sudo sysctl -w net.core.rmem_max=1610612736" to continue'
exit 1
fi
if [[ $(sysctl -n net.core.wmem_default) -lt 1610612736 ]]; then
echo 'Error: rmem_default too small, run "sudo sysctl -w net.core.wmem_default=1610612736" to continue'
exit 1
fi
if [[ $(sysctl -n net.core.wmem_max) -lt 1610612736 ]]; then
echo 'Error: rmem_max too small, run "sudo sysctl -w net.core.wmem_max=1610612736" to continue'
exit 1
fi
set -x
export SOLANA_DYNAMIC_NODES=120
exec cargo +"$rust_stable" test --release --features=erasure test_multi_node_dynamic_network -- --ignored

View File

@ -636,20 +636,7 @@ sanity-or-restart)
else
echo "+++ Sanity failed, updating the network"
$metricsWriteDatapoint "testnet-manager sanity-failure=1"
# TODO: Restore attempt to restart the cluster before recreating it
# See https://github.com/solana-labs/solana/issues/3774
if false; then
if start; then
echo Update successful
else
echo "+++ Update failed, restarting the network"
$metricsWriteDatapoint "testnet-manager update-failure=1"
create-and-start
fi
else
create-and-start
fi
create-and-start
fi
;;
*)

View File

@ -88,14 +88,12 @@ if [[ ! -f $vote_keypair_path ]]; then
fi
if [[ -f $stake_keypair_path ]]; then
# TODO: Add ability to add multiple stakes with this script?
echo "Error: $stake_keypair_path already exists"
exit 1
fi
if ((airdrops_enabled)); then
declare fees=100 # TODO: No hardcoded transaction fees, fetch the current cluster fees
$solana_cli "${common_args[@]}" airdrop $((stake_lamports+fees)) lamports
$solana_cli "${common_args[@]}" airdrop "$stake_lamports" lamports
fi
$solana_keygen new -o "$stake_keypair_path"

View File

@ -377,9 +377,9 @@ EOF
)
if [[ $airdropsEnabled != true ]]; then
echo "TODO: archivers not supported without airdrops"
# TODO: need to provide the `--identity` argument to an existing system
# account with lamports in it
# If this ever becomes a problem, we need to provide the `--identity`
# argument to an existing system account with lamports in it
echo "Error: archivers not supported without airdrops"
exit 1
fi

View File

@ -66,7 +66,6 @@ local|tar|skip)
solana_cli=solana
solana_gossip=solana-gossip
solana_install=solana-install
solana_keygen=solana-keygen
;;
*)
echo "Unknown deployment method: $deployMethod"
@ -85,11 +84,8 @@ fi
echo "+++ $sanityTargetIp: validators"
(
# Ensure solana-cli has a keypair even though it doesn't really need one...
# TODO: Remove when https://github.com/solana-labs/solana/issues/6375 is fixed
$solana_keygen new --force -o temp-id.json
set -x
$solana_cli --keypair temp-id.json --url http://"$sanityTargetIp":8899 show-validators
$solana_cli --url http://"$sanityTargetIp":8899 show-validators
)
echo "+++ $sanityTargetIp: node count ($numSanityNodes expected)"

View File

@ -126,7 +126,7 @@ cloud_Initialize() {
declare networkName="$1"
# ec2-provider.sh creates firewall rules programmatically, should do the same
# here.
echo "TODO: create $networkName firewall rules programmatically instead of assuming the 'testnet' tag exists"
echo "Note: one day create $networkName firewall rules programmatically instead of assuming the 'testnet' tag exists"
}
#

View File

@ -3,8 +3,8 @@ set -ex
#
# Prevent background upgrades that block |apt-get|
#
# TODO: This approach is pretty uncompromising. An alternative solution that
# doesn't involve deleting system files would be welcome.
# This approach is pretty uncompromising. An alternative solution that doesn't
# involve deleting system files would be welcome.
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1

View File

@ -122,7 +122,7 @@ cloud_Initialize() {
declare networkName="$1"
# ec2-provider.sh creates firewall rules programmatically, should do the same
# here.
echo "TODO: create $networkName firewall rules programmatically instead of assuming the 'testnet' tag exists"
echo "Note: one day create $networkName firewall rules programmatically instead of assuming the 'testnet' tag exists"
}
#
@ -170,8 +170,8 @@ cloud_CreateInstances() {
if $enableGpu; then
# Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed
#
# TODO: Unfortunately this image is not public. When this becomes an issue,
# use the stock Ubuntu 18.04 image and programmatically install CUDA after the
# Unfortunately this image is not public. When this becomes an issue, use
# the stock Ubuntu 18.04 image and programmatically install CUDA after the
# instance boots
#
imageName="ubuntu-1804-bionic-v20181029-with-cuda-10-and-cuda-9-2"