diff --git a/.travis.yml b/.travis.yml index f18039a42f..252d8cef98 100644 --- a/.travis.yml +++ b/.travis.yml @@ -106,3 +106,25 @@ jobs: script: - ../.travis/commitlint.sh - source .travis/script.sh + + # docs pull request + - name: "docs" + if: type IN (push, pull_request) + language: node_js + node_js: + - "node" + + services: + - docker + + cache: + directories: + - ~/.npm + + before_install: + - .travis/affects.sh docs/ .travis || travis_terminate 0 + - cd docs/ + - source .travis/before_install.sh + + script: + - source .travis/script.sh diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 234df514a2..bc7a52315b 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -211,12 +211,7 @@ pull_or_push_steps() { all_test_steps fi - # doc/ changes: - if affects ^docs/; then - command_step docs ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image docs/build.sh" 5 - fi - - # web3.js and explorer changes run on Travis... + # web3.js, explorer and docs changes run on Travis... } diff --git a/docs/.eslintrc b/docs/.eslintrc new file mode 100644 index 0000000000..d2204b0b8d --- /dev/null +++ b/docs/.eslintrc @@ -0,0 +1,21 @@ +{ + "env": { + "browser": true, + "node": true + }, + "parser": "babel-eslint", + "rules": { + "strict": 0, + "no-unused-vars": ["error", { "argsIgnorePattern": "^_" }], + "no-trailing-spaces": ["error", { "skipBlankLines": true }] + }, + "settings": { + "react": { + "version": "detect", // React version. "detect" automatically picks the version you have installed. + } + }, + "extends": [ + "eslint:recommended", + "plugin:react/recommended" + ] + } \ No newline at end of file diff --git a/docs/.gitattributes b/docs/.gitattributes deleted file mode 100644 index fd04e543b4..0000000000 --- a/docs/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -theme/highlight.js binary diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000000..dfd0740615 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,22 @@ +# Dependencies +/node_modules + +# Production +/build + +# Generated files +.docusaurus +.cache-loader +.vercel +/static/img/*.svg + +# Misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* diff --git a/docs/.travis/before_install.sh b/docs/.travis/before_install.sh new file mode 100644 index 0000000000..724311cc3a --- /dev/null +++ b/docs/.travis/before_install.sh @@ -0,0 +1,9 @@ +# |source| this file + +curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash - +sudo apt install -y nodejs + +npm install --global docusaurus-init +docusaurus-init + +npm install --global vercel diff --git a/docs/.travis/script.sh b/docs/.travis/script.sh new file mode 100644 index 0000000000..a4fee2dabc --- /dev/null +++ b/docs/.travis/script.sh @@ -0,0 +1,4 @@ +# |source| this file + +set -ex +./build.sh diff --git a/docs/README.md b/docs/README.md index 458e67756e..b3a0bdb370 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,31 +1,38 @@ -Building the Solana Docs ---- +# Docs Readme -Install dependencies, build, and test the docs: +Solana's Docs are built using [Docusaurus 2](https://v2.docusaurus.io/) with `npm`. +Static content delivery is handled using `vercel`. -```bash -$ brew install coreutils -$ brew install mscgen -$ cargo install svgbob_cli -$ cargo install mdbook-linkcheck -$ cargo install mdbook -$ ./build.sh +### Installation + +``` +$ npm install ``` -Run any Rust tests in the markdown: +### Local Development -```bash -$ make test +``` +$ npm run start ``` -Render markdown as HTML: +This command starts a local development server and open up a browser window. Most changes are reflected live without having to restart the server. -```bash -$ make build +### Build +#### Local Build Testing +``` +$ npm run build ``` -Render and view the docs: +This command generates static content into the `build` directory and can be +served using any static contents hosting service. -```bash -$ make open -``` +#### CI Build Flow +The docs are built and published in Travis CI with the `docs/build.sh` script. +On each PR, the docs are built, but not published. + +In each post-commit build, docs are built and published using `vercel` to their +respective domain depending on the build branch. + + - Master branch docs are published to `edge.docs.solana.com` + - Beta branch docs are published to `beta.docs.solana.com` + - Latest release tag docs are published to `docs.solana.com` diff --git a/docs/babel.config.js b/docs/babel.config.js new file mode 100644 index 0000000000..bfd75dbdfc --- /dev/null +++ b/docs/babel.config.js @@ -0,0 +1,3 @@ +module.exports = { + presets: [require.resolve("@docusaurus/core/lib/babel/preset")], +}; diff --git a/docs/book.toml b/docs/book.toml deleted file mode 100644 index a05881c58e..0000000000 --- a/docs/book.toml +++ /dev/null @@ -1,15 +0,0 @@ -[book] -title = "Solana: Blockchain Rebuilt for Scale" -authors = ["The Solana Team"] - -[build] -build-dir = "html" -create-missing = false - -[output.html] -theme = "theme" - -[output.linkcheck] -# Exclude some special links and `README.md` which causes false-positive errors -# Also, crates.io returns 404 for correct links accessed from curl and linkcheck -exclude = [ 'http://192\.168\.1\.88', 'http://localhost', 'LATEST_SOLANA_RELEASE_VERSION', 'README\.md', 'https://crates\.io' ] diff --git a/docs/build-cli-usage.sh b/docs/build-cli-usage.sh index 0c5091b685..4fb5cb6de7 100755 --- a/docs/build-cli-usage.sh +++ b/docs/build-cli-usage.sh @@ -3,6 +3,9 @@ set -e cd "$(dirname "$0")" +# shellcheck source=ci/rust-version.sh +source ../ci/rust-version.sh stable + : "${rust_stable:=}" # Pacify shellcheck usage=$(cargo +"$rust_stable" -q run -p solana-cli -- -C ~/.foo --help | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//') diff --git a/docs/build.sh b/docs/build.sh index f610deb76f..a19c45cd02 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash -set -e +set -ex + +# shellcheck source=ci/env.sh +source ../ci/env.sh cd "$(dirname "$0")" @@ -12,6 +15,31 @@ find src -name '*.md' -a \! -name SUMMARY.md | fi done -mdbook --version -mdbook-linkcheck --version -make -j"$(nproc)" test +: "${rust_stable_docker_image:=}" # Pacify shellcheck + +# shellcheck source=ci/rust-version.sh +source ../ci/rust-version.sh +../ci/docker-run.sh "$rust_stable_docker_image" docs/build-cli-usage.sh +../ci/docker-run.sh "$rust_stable_docker_image" docs/convert-ascii-to-svg.sh +./set-solana-release-tag.sh + +# Build from /src into /build +npm run build + +# Deploy the /build content using vercel +if [[ -d .vercel ]]; then + rm -r .vercel +fi +./set-vercel-project-name.sh + +if [[ -n $CI ]]; then + if [[ -z $CI_PULL_REQUEST ]]; then + [[ -n $VERCEL_TOKEN ]] || { + echo "VERCEL_TOKEN is undefined. Needed for Vercel authentication." + exit 1 + } + vercel deploy . --local-config=vercel.json --confirm --token "$VERCEL_TOKEN" --prod + fi +else + vercel deploy . --local-config=vercel.json +fi diff --git a/docs/convert-ascii-to-svg.sh b/docs/convert-ascii-to-svg.sh new file mode 100755 index 0000000000..097beae8c3 --- /dev/null +++ b/docs/convert-ascii-to-svg.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# Convert .bob and .msc files in docs/art to .svg files located where the +# site build will find them. + +set -e + +cd "$(dirname "$0")" +output_dir=static/img + +mkdir -p "$output_dir" + +while read -r bob_file; do + svg_file=$(basename "${bob_file%.*}".svg) + svgbob "$bob_file" --output "$output_dir/$svg_file" +done < <(find art/*.bob) + +while read -r msc_file; do + svg_file=$(basename "${msc_file%.*}".svg) + mscgen -T svg -o "$output_dir/$svg_file" -i "$msc_file" +done < <(find art/*.msc) diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js new file mode 100644 index 0000000000..a4bb124e9e --- /dev/null +++ b/docs/docusaurus.config.js @@ -0,0 +1,113 @@ +module.exports = { + title: "Solana Docs", + tagline: + "Solana is an open source project implementing a new, high-performance, permissionless blockchain.", + url: "https://docs.solana.com", + baseUrl: "/", + favicon: "img/favicon.ico", + organizationName: "solana-labs", // Usually your GitHub org/user name. + projectName: "solana", // Usually your repo name. + themeConfig: { + navbar: { + logo: { + alt: "Solana Logo", + src: "img/logo-horizontal.svg", + srcDark: "img/logo-horizontal-dark.svg", + }, + links: [ + { + to: "docs/", + activeBasePath: "docs", + label: "Docs", + position: "left", + }, + { + to: "docs/apps/README", + activeBasePath: "docs2", + label: "Developers", + position: "left", + }, + { + to: "docs/running-validator/README", + activeBasePath: "docs2", + label: "Validators", + position: "left", + }, + { + href: "https://discordapp.com/invite/pquxPsq", + label: "Chat", + position: "right", + }, + + { + href: "https://github.com/solana-labs/solana", + label: "GitHub", + position: "right", + }, + ], + }, + footer: { + style: "dark", + links: [ + { + title: "Docs", + items: [ + { + label: "Introduction", + to: "docs/introduction", + }, + { + label: "Tour de SOL", + to: "docs/tour-de-sol/README", + }, + ], + }, + { + title: "Community", + items: [ + { + label: "Discord", + href: "https://discordapp.com/invite/pquxPsq", + }, + { + label: "Twitter", + href: "https://twitter.com/solana", + }, + { + label: "Forums", + href: "https://forums.solana.com", + }, + ], + }, + { + title: "More", + items: [ + { + label: "GitHub", + href: "https://github.com/solana-labs/solana", + }, + ], + }, + ], + copyright: `Copyright © ${new Date().getFullYear()} Solana Foundation`, + }, + }, + presets: [ + [ + "@docusaurus/preset-classic", + { + docs: { + path: "src", + // It is recommended to set document id as docs home page (`docs/` path). + homePageId: "introduction", + sidebarPath: require.resolve("./sidebars.js"), + // Please change this to your repo. + editUrl: "https://github.com/solana-labs/solana/edit/master/docs/", + }, + theme: { + customCss: require.resolve("./src/css/custom.css"), + }, + }, + ], + ], +}; diff --git a/docs/makefile b/docs/makefile deleted file mode 100644 index 5c223ef5e3..0000000000 --- a/docs/makefile +++ /dev/null @@ -1,51 +0,0 @@ -BOB_SRCS=$(wildcard art/*.bob) -MSC_SRCS=$(wildcard art/*.msc) -MD_SRCS=$(wildcard src/*.md src/*/*.md) src/cli/usage.md - -SVG_IMGS=$(BOB_SRCS:art/%.bob=src/.gitbook/assets/%.svg) $(MSC_SRCS:art/%.msc=src/.gitbook/assets/%.svg) - -TARGET=html/index.html -TEST_STAMP=src/tests.ok - -all: $(TARGET) - -svg: $(SVG_IMGS) - -test: $(TEST_STAMP) - -open: $(TEST_STAMP) - mdbook build --open - ./set-solana-release-tag.sh - -watch: $(SVG_IMGS) - mdbook watch - -src/.gitbook/assets/%.svg: art/%.bob - @mkdir -p $(@D) - svgbob < $< > $@ - -src/.gitbook/assets/%.svg: art/%.msc - @mkdir -p $(@D) - mscgen -T svg -i $< -o $@ - -../target/debug/solana: - cd ../cli && cargo build - -src/cli/usage.md: build-cli-usage.sh ../target/debug/solana - ./$< - -src/%.md: %.md - @mkdir -p $(@D) - @cp $< $@ - -$(TEST_STAMP): $(TARGET) - mdbook test - touch $@ - -$(TARGET): $(SVG_IMGS) $(MD_SRCS) - mdbook build - ./set-solana-release-tag.sh - -clean: - rm -f $(SVG_IMGS) src/tests.ok - rm -rf html diff --git a/docs/package.json b/docs/package.json new file mode 100644 index 0000000000..9965552971 --- /dev/null +++ b/docs/package.json @@ -0,0 +1,39 @@ +{ + "name": "solana-docs", + "version": "0.0.0", + "private": true, + "scripts": { + "start": "docusaurus start", + "build": "docusaurus build", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "format": "prettier --check \"**/*.{js,jsx,json,md,scss}\"", + "format:fix": "prettier --write \"**/*.{js,jsx,json,md,scss}\"", + "lint": "set -ex; eslint .", + "lint:fix": "npm run lint -- --fix" + }, + "dependencies": { + "@docusaurus/core": "^2.0.0-alpha.58", + "@docusaurus/preset-classic": "^2.0.0-alpha.58", + "@docusaurus/theme-search-algolia": "^2.0.0-alpha.32", + "babel-eslint": "^10.1.0", + "clsx": "^1.1.1", + "eslint": "^7.3.1", + "eslint-plugin-react": "^7.20.0", + "prettier": "^2.0.5", + "react": "^16.8.4", + "react-dom": "^16.8.4" + }, + "browserslist": { + "production": [ + ">0.2%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] + } +} diff --git a/docs/set-solana-release-tag.sh b/docs/set-solana-release-tag.sh index c3b3499c5f..bafdd031d0 100755 --- a/docs/set-solana-release-tag.sh +++ b/docs/set-solana-release-tag.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash - set -e + cd "$(dirname "$0")" if [[ -n $CI_TAG ]]; then @@ -23,7 +23,6 @@ if [[ -z "$LATEST_SOLANA_RELEASE_VERSION" ]]; then fi set -x -find html/ -name \*.html -exec sed -i "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \; if [[ -n $CI ]]; then find src/ -name \*.md -exec sed -i "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \; fi diff --git a/docs/set-vercel-project-name.sh b/docs/set-vercel-project-name.sh new file mode 100755 index 0000000000..c090ae225e --- /dev/null +++ b/docs/set-vercel-project-name.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# Replaces the PROJECT_NAME value in vercel.json commit based on channel or tag +# so we push the updated docs to the right domain + +set -e + +if [[ -n $CI_TAG ]]; then + NAME=docs-solana-com +else + eval "$(../ci/channel-info.sh)" + case $CHANNEL in + edge) + NAME=edge-docs-solana-com + ;; + beta) + NAME=beta-docs-solana-com + ;; + *) + NAME=docs + ;; + esac +fi + +sed -i s/PROJECT_NAME/$NAME/g vercel.json diff --git a/docs/sidebars.js b/docs/sidebars.js new file mode 100644 index 0000000000..6c72ff0cef --- /dev/null +++ b/docs/sidebars.js @@ -0,0 +1,173 @@ +module.exports = { + docs: { + Introduction: ["introduction"], + "Wallet Guide": [ + "wallet-guide/README", + { + type: "category", + label: "App Wallets", + items: [ + "wallet-guide/apps", + "wallet-guide/trust-wallet", + "wallet-guide/ledger-live", + ], + }, + { + type: "category", + label: "Command-line Wallets", + items: [ + "wallet-guide/cli", + { + type: "category", + label: "Paper Wallets", + items: ["paper-wallet/README", "paper-wallet/paper-wallet-usage"], + }, + { + type: "category", + label: "Hardware Wallets", + items: ["hardware-wallets/README", "hardware-wallets/ledger"], + }, + "file-system-wallet/README", + ], + }, + "wallet-guide/support", + ], + "Command Line Guide": [ + "cli/README", + "cli/install-solana-cli-tools", + "cli/conventions", + "cli/choose-a-cluster", + "cli/transfer-tokens", + "cli/manage-stake-accounts", + "offline-signing/README", + "offline-signing/durable-nonce", + ], + "Solana Clusters": ["clusters"], + "Develop Applications": [ + "apps/README", + "apps/rent", + "apps/webwallet", + "apps/tictactoe", + "apps/drones", + "transaction", + "apps/jsonrpc-api", + "apps/javascript-api", + "apps/builtins/README", + ], + "Integration Guides": ["integrations/exchange"], + "Run a Validator": [ + "running-validator/README", + "running-validator/validator-reqs", + "running-validator/validator-start", + "running-validator/validator-stake", + "running-validator/validator-monitor", + "running-validator/validator-info", + "running-validator/validator-troubleshoot", + ], + "Tour de SOL": [ + "tour-de-sol/README", + "tour-de-sol/useful-links", + { + type: "category", + label: "Registration", + items: [ + "tour-de-sol/registration/how-to-register", + "tour-de-sol/registration/terms-of-participation", + "tour-de-sol/registration/rewards", + "tour-de-sol/registration/confidentiality", + "tour-de-sol/registration/validator-registration-and-rewards-faq", + ], + }, + { + type: "category", + label: "Participation", + items: [ + "tour-de-sol/participation/validator-technical-requirements", + "tour-de-sol/participation/validator-public-key-registration", + "tour-de-sol/participation/steps-to-create-a-validator", + ], + }, + "tour-de-sol/submitting-bugs", + ], + "Benchmark a Cluster": ["cluster/bench-tps", "cluster/performance-metrics"], + "Solana's Architecture": [ + "cluster/README", + "cluster/synchronization", + "cluster/leader-rotation", + "cluster/fork-generation", + "cluster/managing-forks", + "cluster/turbine-block-propagation", + "cluster/vote-signing", + "cluster/stake-delegation-and-rewards", + ], + "Anatomy of a Validator": [ + "validator/README", + "validator/tpu", + "validator/tvu", + "validator/blockstore", + "validator/gossip", + "validator/runtime", + ], + Terminology: ["terminology"], + History: ["history"], + "Implemented Design Proposals": [ + { + type: "category", + label: "Economic Design", + items: [ + "implemented-proposals/ed_overview/README", + { + type: "category", + label: "Validation Client Economics", + items: [ + "implemented-proposals/ed_overview/ed_validation_client_economics/README", + "implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards", + "implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees", + "implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation", + ], + }, + "implemented-proposals/ed_overview/ed_storage_rent_economics", + "implemented-proposals/ed_overview/ed_economic_sustainability", + "implemented-proposals/ed_overview/ed_mvp", + "implemented-proposals/ed_overview/ed_references", + ], + }, + "implemented-proposals/transaction-fees", + "implemented-proposals/tower-bft", + "implemented-proposals/leader-leader-transition", + "implemented-proposals/leader-validator-transition", + "implemented-proposals/persistent-account-storage", + "implemented-proposals/reliable-vote-transmission", + "implemented-proposals/repair-service", + "implemented-proposals/testing-programs", + "implemented-proposals/readonly-accounts", + "implemented-proposals/embedding-move", + "implemented-proposals/staking-rewards", + "implemented-proposals/rent", + "implemented-proposals/durable-tx-nonces", + "implemented-proposals/validator-timestamp-oracle", + "implemented-proposals/commitment", + "implemented-proposals/snapshot-verification", + "implemented-proposals/cross-program-invocation", + "implemented-proposals/program-derived-addresses", + "implemented-proposals/abi-management", + ], + "Accepted Design Proposals": [ + "proposals/README", + "proposals/ledger-replication-to-implement", + "proposals/optimistic-confirmation-and-slashing", + "proposals/vote-signing-to-implement", + "proposals/cluster-test-framework", + "proposals/validator-proposal", + "proposals/simple-payment-and-state-verification", + "proposals/interchain-transaction-verification", + "proposals/snapshot-verification", + "proposals/bankless-leader", + "proposals/slashing", + "proposals/tick-verification", + "proposals/block-confirmation", + "proposals/rust-clients", + "proposals/optimistic_confirmation", + ], + }, +}; diff --git a/docs/src/.gitbook/assets/economic_design_infl_230719.png b/docs/src/.gitbook/assets/economic_design_infl_230719.png deleted file mode 100644 index 2ce0958cc4..0000000000 Binary files a/docs/src/.gitbook/assets/economic_design_infl_230719.png and /dev/null differ diff --git a/docs/src/.gitbook/assets/ledger-live-enable-developer-mode.png b/docs/src/.gitbook/assets/ledger-live-enable-developer-mode.png deleted file mode 100644 index 296fab7d21..0000000000 Binary files a/docs/src/.gitbook/assets/ledger-live-enable-developer-mode.png and /dev/null differ diff --git a/docs/src/.gitbook/assets/ledger-live-install-solana-app.png b/docs/src/.gitbook/assets/ledger-live-install-solana-app.png deleted file mode 100644 index 46986a69e7..0000000000 Binary files a/docs/src/.gitbook/assets/ledger-live-install-solana-app.png and /dev/null differ diff --git a/docs/src/.gitbook/assets/ledger-live-latest-version-installed.png b/docs/src/.gitbook/assets/ledger-live-latest-version-installed.png deleted file mode 100644 index a2d94caa9a..0000000000 Binary files a/docs/src/.gitbook/assets/ledger-live-latest-version-installed.png and /dev/null differ diff --git a/docs/src/.gitbook/assets/ledger-live-update-available-v0.2.2.png b/docs/src/.gitbook/assets/ledger-live-update-available-v0.2.2.png deleted file mode 100644 index 5756dff878..0000000000 Binary files a/docs/src/.gitbook/assets/ledger-live-update-available-v0.2.2.png and /dev/null differ diff --git a/docs/src/.gitbook/assets/p_ex_interest.png b/docs/src/.gitbook/assets/p_ex_interest.png deleted file mode 100755 index 3329fe04e5..0000000000 Binary files a/docs/src/.gitbook/assets/p_ex_interest.png and /dev/null differ diff --git a/docs/src/.gitbook/assets/p_ex_schedule.png b/docs/src/.gitbook/assets/p_ex_schedule.png deleted file mode 100644 index beb4c0cc5c..0000000000 Binary files a/docs/src/.gitbook/assets/p_ex_schedule.png and /dev/null differ diff --git a/docs/src/.gitbook/assets/p_ex_supply.png b/docs/src/.gitbook/assets/p_ex_supply.png deleted file mode 100644 index 3779849368..0000000000 Binary files a/docs/src/.gitbook/assets/p_ex_supply.png and /dev/null differ diff --git a/docs/src/.gitbook/assets/porep_reward.png b/docs/src/.gitbook/assets/porep_reward.png deleted file mode 100644 index 501cf29e1d..0000000000 Binary files a/docs/src/.gitbook/assets/porep_reward.png and /dev/null differ diff --git a/docs/src/.gitbook/assets/ramp-tps-rounds.png b/docs/src/.gitbook/assets/ramp-tps-rounds.png deleted file mode 100644 index 6da4b959ed..0000000000 Binary files a/docs/src/.gitbook/assets/ramp-tps-rounds.png and /dev/null differ diff --git a/docs/src/.gitbook/assets/solana-tour-de-sol-participation-terms-20190723.pdf b/docs/src/.gitbook/assets/solana-tour-de-sol-participation-terms-20190723.pdf deleted file mode 100644 index 9e75dc30e2..0000000000 Binary files a/docs/src/.gitbook/assets/solana-tour-de-sol-participation-terms-20190723.pdf and /dev/null differ diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 2dd5a4b987..3a81b2ff08 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -1,4 +1,6 @@ -# Table of contents +--- +title: Table of contents +--- * [Introduction](introduction.md) * [Wallet Guide](wallet-guide/README.md) diff --git a/docs/src/apps/README.md b/docs/src/apps/README.md index a85e214fb0..977ee076da 100644 --- a/docs/src/apps/README.md +++ b/docs/src/apps/README.md @@ -1,4 +1,6 @@ -# Programming Model +--- +title: Programming Model +--- An _app_ interacts with a Solana cluster by sending it _transactions_ with one or more _instructions_. The Solana _runtime_ passes those instructions to _programs_ deployed by app developers beforehand. An instruction might, for example, tell a program to transfer _lamports_ from one _account_ to another or create an interactive contract that governs how lamports are transferred. Instructions are executed sequentially and atomically for each transaction. If any instruction is invalid, all account changes in the transaction are discarded. @@ -18,7 +20,7 @@ Each instruction specifies a single program account \(which must be marked execu ## Deploying Programs to a Cluster -![SDK tools](../.gitbook/assets/sdk-tools.svg) +![SDK tools](/img/sdk-tools.svg) As shown in the diagram above, a program author creates a program and compiles it to an ELF shared object containing BPF bytecode and uploads it to the Solana cluster with a special _deploy_ transaction. The cluster makes it available to clients via a _program ID_. The program ID is a _address_ specified when deploying and is used to reference the program in subsequent transactions. diff --git a/docs/src/apps/builtins/README.md b/docs/src/apps/builtins/README.md index 1a919b2ad9..d060818755 100644 --- a/docs/src/apps/builtins/README.md +++ b/docs/src/apps/builtins/README.md @@ -1,4 +1,6 @@ -# Builtin Programs +--- +title: Builtin Programs +--- Solana contains a small handful of builtin programs, which are required to run validator nodes. Unlike third-party programs, the builtin programs are part of @@ -18,15 +20,15 @@ programs, as well include instructions from third-party programs. Create accounts and transfer lamports between them -* Program ID: `11111111111111111111111111111111` -* Instructions: [SystemInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/system_instruction/enum.SystemInstruction.html) +- Program ID: `11111111111111111111111111111111` +- Instructions: [SystemInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/system_instruction/enum.SystemInstruction.html) ## Config Program Add configuration data to the chain and the list of public keys that are permitted to modify it -* Program ID: `Config1111111111111111111111111111111111111` -* Instructions: [config_instruction](https://docs.rs/solana-config-program/LATEST_SOLANA_RELEASE_VERSION/solana_config_program/config_instruction/index.html) +- Program ID: `Config1111111111111111111111111111111111111` +- Instructions: [config_instruction](https://docs.rs/solana-config-program/LATEST_SOLANA_RELEASE_VERSION/solana_config_program/config_instruction/index.html) Unlike the other programs, the Config program does not define any individual instructions. It has just one implicit instruction, a "store" instruction. Its @@ -37,25 +39,25 @@ data to store in it. Create stake accounts and delegate it to validators -* Program ID: `Stake11111111111111111111111111111111111111` -* Instructions: [StakeInstruction](https://docs.rs/solana-stake-program/LATEST_SOLANA_RELEASE_VERSION/solana_stake_program/stake_instruction/enum.StakeInstruction.html) +- Program ID: `Stake11111111111111111111111111111111111111` +- Instructions: [StakeInstruction](https://docs.rs/solana-stake-program/LATEST_SOLANA_RELEASE_VERSION/solana_stake_program/stake_instruction/enum.StakeInstruction.html) ## Vote Program Create vote accounts and vote on blocks -* Program ID: `Vote111111111111111111111111111111111111111` -* Instructions: [VoteInstruction](https://docs.rs/solana-vote-program/LATEST_SOLANA_RELEASE_VERSION/solana_vote_program/vote_instruction/enum.VoteInstruction.html) +- Program ID: `Vote111111111111111111111111111111111111111` +- Instructions: [VoteInstruction](https://docs.rs/solana-vote-program/LATEST_SOLANA_RELEASE_VERSION/solana_vote_program/vote_instruction/enum.VoteInstruction.html) ## BPF Loader Add programs to the chain. -* Program ID: `BPFLoader1111111111111111111111111111111111` -* Instructions: [LoaderInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/loader_instruction/enum.LoaderInstruction.html) +- Program ID: `BPFLoader1111111111111111111111111111111111` +- Instructions: [LoaderInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/loader_instruction/enum.LoaderInstruction.html) The BPF Loader marks itself as its "owner" of the executable account it creates to store your program. When a user invokes an instruction via a program ID, the Solana runtime will load both your executable account and its owner, the BPF Loader. The runtime then passes your program to the BPF Loader -to process the instruction. \ No newline at end of file +to process the instruction. diff --git a/docs/src/apps/drones.md b/docs/src/apps/drones.md index 05925938da..9a8436ca18 100644 --- a/docs/src/apps/drones.md +++ b/docs/src/apps/drones.md @@ -1,4 +1,6 @@ -# Drones +--- +title: Drones +--- This section defines an off-chain service called a _drone_, which acts as custodian of a user's private key. In its simplest form, it can be used to create _airdrop_ transactions, a token transfer from the drone's account to a client's account. @@ -20,7 +22,7 @@ Note: the Solana cluster will not parallelize transactions funded by the same fe ## Attack vectors -### Invalid recent\_blockhash +### Invalid recent_blockhash The drone may prefer its airdrops only target a particular Solana cluster. To do that, it listens to the cluster for new entry IDs and ensure any requests reference a recent one. @@ -41,4 +43,3 @@ A client may request multiple airdrops before the first has been submitted to th If the transaction data size is smaller than the size of the returned signature \(or descriptive error\), a single client can flood the network. Considering that a simple `Transfer` operation requires two public keys \(each 32 bytes\) and a `fee` field, and that the returned signature is 64 bytes \(and a byte to indicate `Ok`\), consideration for this attack may not be required. In the current design, the drone accepts TCP connections. This allows clients to DoS the service by simply opening lots of idle connections. Switching to UDP may be preferred. The transaction data will be smaller than a UDP packet since the transaction sent to the Solana cluster is already pinned to using UDP. - diff --git a/docs/src/apps/javascript-api.md b/docs/src/apps/javascript-api.md index 5f891d0fc0..b90f0670d9 100644 --- a/docs/src/apps/javascript-api.md +++ b/docs/src/apps/javascript-api.md @@ -1,4 +1,5 @@ -# JavaScript API +--- +title: JavaScript API +--- See [solana-web3](https://solana-labs.github.io/solana-web3.js/). - diff --git a/docs/src/apps/jsonrpc-api.md b/docs/src/apps/jsonrpc-api.md index 7551979a26..7510a04caa 100644 --- a/docs/src/apps/jsonrpc-api.md +++ b/docs/src/apps/jsonrpc-api.md @@ -1,4 +1,6 @@ -# JSON RPC API +--- +title: JSON RPC API +--- Solana nodes accept HTTP requests using the [JSON-RPC 2.0](https://www.jsonrpc.org/specification) specification. @@ -14,62 +16,62 @@ To interact with a Solana node inside a JavaScript application, use the [solana- ## Methods -* [getAccountInfo](jsonrpc-api.md#getaccountinfo) -* [getBalance](jsonrpc-api.md#getbalance) -* [getBlockCommitment](jsonrpc-api.md#getblockcommitment) -* [getBlockTime](jsonrpc-api.md#getblocktime) -* [getClusterNodes](jsonrpc-api.md#getclusternodes) -* [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock) -* [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks) -* [getConfirmedSignaturesForAddress](jsonrpc-api.md#getconfirmedsignaturesforaddress) -* [getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransaction) -* [getEpochInfo](jsonrpc-api.md#getepochinfo) -* [getEpochSchedule](jsonrpc-api.md#getepochschedule) -* [getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash) -* [getFeeRateGovernor](jsonrpc-api.md#getfeerategovernor) -* [getFees](jsonrpc-api.md#getfees) -* [getFirstAvailableBlock](jsonrpc-api.md#getfirstavailableblock) -* [getGenesisHash](jsonrpc-api.md#getgenesishash) -* [getIdentity](jsonrpc-api.md#getidentity) -* [getInflationGovernor](jsonrpc-api.md#getinflationgovernor) -* [getInflationRate](jsonrpc-api.md#getinflationrate) -* [getLargestAccounts](jsonrpc-api.md#getlargestaccounts) -* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule) -* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption) -* [getProgramAccounts](jsonrpc-api.md#getprogramaccounts) -* [getRecentBlockhash](jsonrpc-api.md#getrecentblockhash) -* [getSignatureStatuses](jsonrpc-api.md#getsignaturestatuses) -* [getSlot](jsonrpc-api.md#getslot) -* [getSlotLeader](jsonrpc-api.md#getslotleader) -* [getStakeActivation](jsonrpc-api.md#getstakeactivation) -* [getSupply](jsonrpc-api.md#getsupply) -* [getTransactionCount](jsonrpc-api.md#gettransactioncount) -* [getVersion](jsonrpc-api.md#getversion) -* [getVoteAccounts](jsonrpc-api.md#getvoteaccounts) -* [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot) -* [requestAirdrop](jsonrpc-api.md#requestairdrop) -* [sendTransaction](jsonrpc-api.md#sendtransaction) -* [simulateTransaction](jsonrpc-api.md#simulatetransaction) -* [setLogFilter](jsonrpc-api.md#setlogfilter) -* [validatorExit](jsonrpc-api.md#validatorexit) -* [Subscription Websocket](jsonrpc-api.md#subscription-websocket) - * [accountSubscribe](jsonrpc-api.md#accountsubscribe) - * [accountUnsubscribe](jsonrpc-api.md#accountunsubscribe) - * [programSubscribe](jsonrpc-api.md#programsubscribe) - * [programUnsubscribe](jsonrpc-api.md#programunsubscribe) - * [signatureSubscribe](jsonrpc-api.md#signaturesubscribe) - * [signatureUnsubscribe](jsonrpc-api.md#signatureunsubscribe) - * [slotSubscribe](jsonrpc-api.md#slotsubscribe) - * [slotUnsubscribe](jsonrpc-api.md#slotunsubscribe) +- [getAccountInfo](jsonrpc-api.md#getaccountinfo) +- [getBalance](jsonrpc-api.md#getbalance) +- [getBlockCommitment](jsonrpc-api.md#getblockcommitment) +- [getBlockTime](jsonrpc-api.md#getblocktime) +- [getClusterNodes](jsonrpc-api.md#getclusternodes) +- [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock) +- [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks) +- [getConfirmedSignaturesForAddress](jsonrpc-api.md#getconfirmedsignaturesforaddress) +- [getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransaction) +- [getEpochInfo](jsonrpc-api.md#getepochinfo) +- [getEpochSchedule](jsonrpc-api.md#getepochschedule) +- [getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash) +- [getFeeRateGovernor](jsonrpc-api.md#getfeerategovernor) +- [getFees](jsonrpc-api.md#getfees) +- [getFirstAvailableBlock](jsonrpc-api.md#getfirstavailableblock) +- [getGenesisHash](jsonrpc-api.md#getgenesishash) +- [getIdentity](jsonrpc-api.md#getidentity) +- [getInflationGovernor](jsonrpc-api.md#getinflationgovernor) +- [getInflationRate](jsonrpc-api.md#getinflationrate) +- [getLargestAccounts](jsonrpc-api.md#getlargestaccounts) +- [getLeaderSchedule](jsonrpc-api.md#getleaderschedule) +- [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption) +- [getProgramAccounts](jsonrpc-api.md#getprogramaccounts) +- [getRecentBlockhash](jsonrpc-api.md#getrecentblockhash) +- [getSignatureStatuses](jsonrpc-api.md#getsignaturestatuses) +- [getSlot](jsonrpc-api.md#getslot) +- [getSlotLeader](jsonrpc-api.md#getslotleader) +- [getStakeActivation](jsonrpc-api.md#getstakeactivation) +- [getSupply](jsonrpc-api.md#getsupply) +- [getTransactionCount](jsonrpc-api.md#gettransactioncount) +- [getVersion](jsonrpc-api.md#getversion) +- [getVoteAccounts](jsonrpc-api.md#getvoteaccounts) +- [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot) +- [requestAirdrop](jsonrpc-api.md#requestairdrop) +- [sendTransaction](jsonrpc-api.md#sendtransaction) +- [simulateTransaction](jsonrpc-api.md#simulatetransaction) +- [setLogFilter](jsonrpc-api.md#setlogfilter) +- [validatorExit](jsonrpc-api.md#validatorexit) +- [Subscription Websocket](jsonrpc-api.md#subscription-websocket) + - [accountSubscribe](jsonrpc-api.md#accountsubscribe) + - [accountUnsubscribe](jsonrpc-api.md#accountunsubscribe) + - [programSubscribe](jsonrpc-api.md#programsubscribe) + - [programUnsubscribe](jsonrpc-api.md#programunsubscribe) + - [signatureSubscribe](jsonrpc-api.md#signaturesubscribe) + - [signatureUnsubscribe](jsonrpc-api.md#signatureunsubscribe) + - [slotSubscribe](jsonrpc-api.md#slotsubscribe) + - [slotUnsubscribe](jsonrpc-api.md#slotunsubscribe) ## Request Formatting To make a JSON-RPC request, send an HTTP POST request with a `Content-Type: application/json` header. The JSON request data should contain 4 fields: -* `jsonrpc: `, set to `"2.0"` -* `id: `, a unique client-generated identifying integer -* `method: `, a string containing the method to be invoked -* `params: `, a JSON array of ordered parameter values +- `jsonrpc: `, set to `"2.0"` +- `id: `, a unique client-generated identifying integer +- `method: `, a string containing the method to be invoked +- `params: `, a JSON array of ordered parameter values Example using curl: @@ -79,27 +81,28 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, " The response output will be a JSON object with the following fields: -* `jsonrpc: `, matching the request specification -* `id: `, matching the request identifier -* `result: `, requested data or success confirmation +- `jsonrpc: `, matching the request specification +- `id: `, matching the request identifier +- `result: `, requested data or success confirmation Requests can be sent in batches by sending an array of JSON-RPC request objects as the data for a single POST. ## Definitions -* Hash: A SHA-256 hash of a chunk of data. -* Pubkey: The public key of a Ed25519 key-pair. -* Signature: An Ed25519 signature of a chunk of data. -* Transaction: A Solana instruction signed by a client key-pair. +- Hash: A SHA-256 hash of a chunk of data. +- Pubkey: The public key of a Ed25519 key-pair. +- Signature: An Ed25519 signature of a chunk of data. +- Transaction: A Solana instruction signed by a client key-pair. ## Configuring State Commitment Solana nodes choose which bank state to query based on a commitment requirement set by the client. Clients may specify either: -* `{"commitment":"max"}` - the node will query the most recent bank confirmed by the cluster as having reached `MAX_LOCKOUT_HISTORY` confirmations -* `{"commitment":"root"}` - the node will query the most recent bank having reached `MAX_LOCKOUT_HISTORY` confirmations on this node -* `{"commitment":"single"}` - the node will query the most recent bank having reached 1 confirmation -* `{"commitment":"recent"}` - the node will query its most recent bank + +- `{"commitment":"max"}` - the node will query the most recent bank confirmed by the cluster as having reached `MAX_LOCKOUT_HISTORY` confirmations +- `{"commitment":"root"}` - the node will query the most recent bank having reached `MAX_LOCKOUT_HISTORY` confirmations on this node +- `{"commitment":"single"}` - the node will query the most recent bank having reached 1 confirmation +- `{"commitment":"recent"}` - the node will query its most recent bank The commitment parameter should be included as the last element in the `params` array: @@ -108,22 +111,25 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, " ``` #### Default: + If commitment configuration is not provided, the node will default to `"commitment":"max"` Only methods that query bank state accept the commitment parameter. They are indicated in the API Reference below. #### RpcResponse Structure + Many methods that take a commitment parameter return an RpcResponse JSON object comprised of two parts: -* `context` : An RpcResponseContext JSON structure including a `slot` field at which the operation was evaluated. -* `value` : The value returned by the operation itself. - +- `context` : An RpcResponseContext JSON structure including a `slot` field at which the operation was evaluated. +- `value` : The value returned by the operation itself. ## Health Check + Although not a JSON RPC API, a `GET /heath` at the RPC HTTP Endpoint provides a health-check mechanism for use by load balancers or other network -infrastructure. This request will always return a HTTP 200 OK response with a body of +infrastructure. This request will always return a HTTP 200 OK response with a body of "ok" or "behind" based on the following conditions: + 1. If one or more `--trusted-validator` arguments are provided to `solana-validator`, "ok" is returned when the node has within `HEALTH_CHECK_SLOT_DISTANCE` slots of the highest trusted validator, otherwise "behind" is returned. @@ -137,23 +143,23 @@ Returns all information associated with the account of provided Pubkey #### Parameters: -* `` - Pubkey of account to query, as base-58 encoded string -* `` - (optional) Configuration object containing the following optional fields: - * (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) - * (optional) `encoding: ` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary. +- `` - Pubkey of account to query, as base-58 encoded string +- `` - (optional) Configuration object containing the following optional fields: + - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) + - (optional) `encoding: ` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary. Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type ``. #### Results: The result will be an RpcResponse JSON object with `value` equal to: -* `` - if the requested account doesn't exist -* `` - otherwise, a JSON object containing: - * `lamports: `, number of lamports assigned to this account, as a u64 - * `owner: `, base-58 encoded Pubkey of the program this account has been assigned to - * `data: `, data associated with the account, either as base-58 encoded binary data or JSON format `{: }`, depending on encoding parameter - * `executable: `, boolean indicating if the account contains a program \(and is strictly read-only\) - * `rentEpoch`: , the epoch at which this account will next owe rent, as u64 +- `` - if the requested account doesn't exist +- `` - otherwise, a JSON object containing: + - `lamports: `, number of lamports assigned to this account, as a u64 + - `owner: `, base-58 encoded Pubkey of the program this account has been assigned to + - `data: `, data associated with the account, either as base-58 encoded binary data or JSON format `{: }`, depending on encoding parameter + - `executable: `, boolean indicating if the account contains a program \(and is strictly read-only\) + - `rentEpoch: `, the epoch at which this account will next owe rent, as u64 #### Example: @@ -177,12 +183,12 @@ Returns the balance of the account of provided Pubkey #### Parameters: -* `` - Pubkey of account to query, as base-58 encoded string -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - Pubkey of account to query, as base-58 encoded string +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: -* `RpcResponse` - RpcResponse JSON object with `value` field set to the balance +- `RpcResponse` - RpcResponse JSON object with `value` field set to the balance #### Example: @@ -200,16 +206,16 @@ Returns commitment for particular block #### Parameters: -* `` - block, identified by Slot +- `` - block, identified by Slot #### Results: The result field will be a JSON object containing: -* `commitment` - commitment, comprising either: - * `` - Unknown block - * `` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY` + 1 -* `totalStake` - total active stake, in lamports, of the current epoch +- `commitment` - commitment, comprising either: + - `` - Unknown block + - `` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY` + 1 +- `totalStake` - total active stake, in lamports, of the current epoch #### Example: @@ -237,12 +243,12 @@ query a node that is built from genesis and retains the entire ledger. #### Parameters: -* `` - block, identified by Slot +- `` - block, identified by Slot #### Results: -* `` - block has not yet been produced -* `` - estimated production time, as Unix timestamp (seconds since the Unix epoch) +- `` - block has not yet been produced +- `` - estimated production time, as Unix timestamp (seconds since the Unix epoch) #### Example: @@ -266,11 +272,11 @@ None The result field will be an array of JSON objects, each with the following sub fields: -* `pubkey: ` - Node public key, as base-58 encoded string -* `gossip: ` - Gossip network address for the node -* `tpu: ` - TPU network address for the node -* `rpc: |null` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled -* `version: |null` - The software version of the node, or `null` if the version information is not available +- `pubkey: ` - Node public key, as base-58 encoded string +- `gossip: ` - Gossip network address for the node +- `tpu: ` - TPU network address for the node +- `rpc: |null` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled +- `version: |null` - The software version of the node, or `null` if the version information is not available #### Example: @@ -288,33 +294,33 @@ Returns identity and transaction information about a confirmed block in the ledg #### Parameters: -* `` - slot, as u64 integer -* `` - (optional) encoding for each returned Transaction, either "json", "jsonParsed", or "binary". If parameter not provided, the default encoding is JSON. +- `` - slot, as u64 integer +- `` - (optional) encoding for each returned Transaction, either "json", "jsonParsed", or "binary". If parameter not provided, the default encoding is JSON. Parsed-JSON encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If parsed-JSON is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). #### Results: The result field will be an object with the following fields: -* `` - if specified block is not confirmed -* `` - if block is confirmed, an object with the following fields: - * `blockhash: ` - the blockhash of this block, as base-58 encoded string - * `previousBlockhash: ` - the blockhash of this block's parent, as base-58 encoded string; if the parent block is not available due to ledger cleanup, this field will return "11111111111111111111111111111111" - * `parentSlot: ` - the slot index of this block's parent - * `transactions: ` - an array of JSON objects containing: - * `transaction: ` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter - * `meta: ` - transaction status metadata object, containing `null` or: - * `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14) - * `fee: ` - fee this transaction was charged, as u64 integer - * `preBalances: ` - array of u64 account balances from before the transaction was processed - * `postBalances: ` - array of u64 account balances after the transaction was processed - * DEPRECATED: `status: ` - Transaction status - * `"Ok": ` - Transaction was successful - * `"Err": ` - Transaction failed with TransactionError - * `rewards: ` - an array of JSON objects containing: - * `pubkey: ` - The public key, as base-58 encoded string, of the account that received the reward - * `lamports: `- number of reward lamports credited or debited by the account, as a i64 - * `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available +- `` - if specified block is not confirmed +- `` - if block is confirmed, an object with the following fields: + - `blockhash: ` - the blockhash of this block, as base-58 encoded string + - `previousBlockhash: ` - the blockhash of this block's parent, as base-58 encoded string; if the parent block is not available due to ledger cleanup, this field will return "11111111111111111111111111111111" + - `parentSlot: ` - the slot index of this block's parent + - `transactions: ` - an array of JSON objects containing: + - `transaction: ` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter + - `meta: ` - transaction status metadata object, containing `null` or: + - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14) + - `fee: ` - fee this transaction was charged, as u64 integer + - `preBalances: ` - array of u64 account balances from before the transaction was processed + - `postBalances: ` - array of u64 account balances after the transaction was processed + - DEPRECATED: `status: ` - Transaction status + - `"Ok": ` - Transaction was successful + - `"Err": ` - Transaction failed with TransactionError + - `rewards: ` - an array of JSON objects containing: + - `pubkey: ` - The public key, as base-58 encoded string, of the account that received the reward + - `lamports: `- number of reward lamports credited or debited by the account, as a i64 + - `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available #### Example: @@ -338,18 +344,18 @@ Transactions are quite different from those on other blockchains. Be sure to rev The JSON structure of a transaction is defined as follows: -* `signatures: ` - A list of base-58 encoded signatures applied to the transaction. The list is always of length `message.header.numRequiredSignatures`, and the signature at index `i` corresponds to the public key at index `i` in `message.account_keys`. -* `message: ` - Defines the content of the transaction. - * `accountKeys: ` - List of base-58 encoded public keys used by the transaction, including by the instructions and for signatures. The first `message.header.numRequiredSignatures` public keys must sign the transaction. - * `header: ` - Details the account types and signatures required by the transaction. - * `numRequiredSignatures: ` - The total number of signatures required to make the transaction valid. The signatures must match the first `numRequiredSignatures` of `message.account_keys`. - * `numReadonlySignedAccounts: ` - The last `numReadonlySignedAccounts` of the signed keys are read-only accounts. Programs may process multiple transactions that load read-only accounts within a single PoH entry, but are not permitted to credit or debit lamports or modify account data. Transactions targeting the same read-write account are evaluated sequentially. - * `numReadonlyUnsignedAccounts: ` - The last `numReadonlyUnsignedAccounts` of the unsigned keys are read-only accounts. - * `recentBlockhash: ` - A base-58 encoded hash of a recent block in the ledger used to prevent transaction duplication and to give transactions lifetimes. - * `instructions: ` - List of program instructions that will be executed in sequence and committed in one atomic transaction if all succeed. - * `programIdIndex: ` - Index into the `message.accountKeys` array indicating the program account that executes this instruction. - * `accounts: ` - List of ordered indices into the `message.accountKeys` array indicating which accounts to pass to the program. - * `data: ` - The program input data encoded in a base-58 string. +- `signatures: ` - A list of base-58 encoded signatures applied to the transaction. The list is always of length `message.header.numRequiredSignatures`, and the signature at index `i` corresponds to the public key at index `i` in `message.account_keys`. +- `message: ` - Defines the content of the transaction. + - `accountKeys: ` - List of base-58 encoded public keys used by the transaction, including by the instructions and for signatures. The first `message.header.numRequiredSignatures` public keys must sign the transaction. + - `header: ` - Details the account types and signatures required by the transaction. + - `numRequiredSignatures: ` - The total number of signatures required to make the transaction valid. The signatures must match the first `numRequiredSignatures` of `message.account_keys`. + - `numReadonlySignedAccounts: ` - The last `numReadonlySignedAccounts` of the signed keys are read-only accounts. Programs may process multiple transactions that load read-only accounts within a single PoH entry, but are not permitted to credit or debit lamports or modify account data. Transactions targeting the same read-write account are evaluated sequentially. + - `numReadonlyUnsignedAccounts: ` - The last `numReadonlyUnsignedAccounts` of the unsigned keys are read-only accounts. + - `recentBlockhash: ` - A base-58 encoded hash of a recent block in the ledger used to prevent transaction duplication and to give transactions lifetimes. + - `instructions: ` - List of program instructions that will be executed in sequence and committed in one atomic transaction if all succeed. + - `programIdIndex: ` - Index into the `message.accountKeys` array indicating the program account that executes this instruction. + - `accounts: ` - List of ordered indices into the `message.accountKeys` array indicating which accounts to pass to the program. + - `data: ` - The program input data encoded in a base-58 string. ### getConfirmedBlocks @@ -357,8 +363,8 @@ Returns a list of confirmed blocks #### Parameters: -* `` - start_slot, as u64 integer -* `` - (optional) end_slot, as u64 integer +- `` - start_slot, as u64 integer +- `` - (optional) end_slot, as u64 integer #### Results: @@ -384,14 +390,15 @@ address, within a specified Slot range. Max range allowed is 10,000 Slots #### Parameters: -* `` - account address as base-58 encoded string -* `` - start slot, inclusive -* `` - end slot, inclusive +- `` - account address as base-58 encoded string +- `` - start slot, inclusive +- `` - end slot, inclusive #### Results: The result field will be an array of: -* `` - transaction signature as base-58 encoded string + +- `` - transaction signature as base-58 encoded string The signatures will be ordered based on the Slot in which they were confirmed in, from lowest to highest Slot @@ -411,24 +418,24 @@ Returns transaction details for a confirmed transaction #### Parameters: -* `` - transaction signature as base-58 encoded string -* `` - (optional) encoding for the returned Transaction, either "json", "jsonParsed", or "binary". If parameter not provided, the default encoding is JSON. - Parsed-JSON encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If parsed-JSON is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). +- `` - transaction signature as base-58 encoded string +N encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If parsed-JSON is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). +- `` - (optional) encoding for the returned Transaction, either "json", "jsonParsed", or "binary". #### Results: -* `` - if transaction is not found or not confirmed -* `` - if transaction is confirmed, an object with the following fields: - * `slot: ` - the slot this transaction was processed in - * `transaction: ` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter - * `meta: ` - transaction status metadata object: - * `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14) - * `fee: ` - fee this transaction was charged, as u64 integer - * `preBalances: ` - array of u64 account balances from before the transaction was processed - * `postBalances: ` - array of u64 account balances after the transaction was processed - * DEPRECATED: `status: ` - Transaction status - * `"Ok": ` - Transaction was successful - * `"Err": ` - Transaction failed with TransactionError +- `` - if transaction is not found or not confirmed +- `` - if transaction is confirmed, an object with the following fields: + - `slot: ` - the slot this transaction was processed in + - `transaction: ` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter + - `meta: ` - transaction status metadata object: + - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14) + - `fee: ` - fee this transaction was charged, as u64 integer + - `preBalances: ` - array of u64 account balances from before the transaction was processed + - `postBalances: ` - array of u64 account balances after the transaction was processed + - DEPRECATED: `status: ` - Transaction status + - `"Ok": ` - Transaction was successful + - `"Err": ` - Transaction failed with TransactionError #### Example: @@ -452,16 +459,16 @@ Returns information about the current epoch #### Parameters: -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: The result field will be an object with the following fields: -* `absoluteSlot: `, the current slot -* `epoch: `, the current epoch -* `slotIndex: `, the current slot relative to the start of the current epoch -* `slotsInEpoch: `, the number of slots in this epoch +- `absoluteSlot: `, the current slot +- `epoch: `, the current epoch +- `slotIndex: `, the current slot relative to the start of the current epoch +- `slotsInEpoch: `, the number of slots in this epoch #### Example: @@ -485,11 +492,11 @@ None The result field will be an object with the following fields: -* `slotsPerEpoch: `, the maximum number of slots in each epoch -* `leaderScheduleSlotOffset: `, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch -* `warmup: `, whether epochs start short and grow -* `firstNormalEpoch: `, first normal-length epoch, log2(slotsPerEpoch) - log2(MINIMUM_SLOTS_PER_EPOCH) -* `firstNormalSlot: `, MINIMUM_SLOTS_PER_EPOCH * (2.pow(firstNormalEpoch) - 1) +- `slotsPerEpoch: `, the maximum number of slots in each epoch +- `leaderScheduleSlotOffset: `, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch +- `warmup: `, whether epochs start short and grow +- `firstNormalEpoch: `, first normal-length epoch, log2(slotsPerEpoch) - log2(MINIMUM_SLOTS_PER_EPOCH) +- `firstNormalSlot: `, MINIMUM_SLOTS_PER_EPOCH \* (2.pow(firstNormalEpoch) - 1) #### Example: @@ -507,16 +514,16 @@ Returns the fee calculator associated with the query blockhash, or `null` if the #### Parameters: -* `` - query blockhash as a Base58 encoded string -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - query blockhash as a Base58 encoded string +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: The result will be an RpcResponse JSON object with `value` equal to: -* `` - if the query blockhash has expired -* `` - otherwise, a JSON object containing: - * `feeCalculator: `, `FeeCalculator` object describing the cluster fee rate at the queried blockhash +- `` - if the query blockhash has expired +- `` - otherwise, a JSON object containing: + - `feeCalculator: `, `FeeCalculator` object describing the cluster fee rate at the queried blockhash #### Example: @@ -540,11 +547,11 @@ None The `result` field will be an `object` with the following fields: -* `burnPercent: `, Percentage of fees collected to be destroyed -* `maxLamportsPerSignature: `, Largest value `lamportsPerSignature` can attain for the next slot -* `minLamportsPerSignature: `, Smallest value `lamportsPerSignature` can attain for the next slot -* `targetLamportsPerSignature: `, Desired fee rate for the cluster -* `targetSignaturesPerSlot: `, Desired signature rate for the cluster +- `burnPercent: `, Percentage of fees collected to be destroyed +- `maxLamportsPerSignature: `, Largest value `lamportsPerSignature` can attain for the next slot +- `minLamportsPerSignature: `, Smallest value `lamportsPerSignature` can attain for the next slot +- `targetLamportsPerSignature: `, Desired fee rate for the cluster +- `targetSignaturesPerSlot: `, Desired signature rate for the cluster #### Example: @@ -564,15 +571,15 @@ which the blockhash will be valid. #### Parameters: -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields: -* `blockhash: ` - a Hash as base-58 encoded string -* `feeCalculator: ` - FeeCalculator object, the fee schedule for this block hash -* `lastValidSlot: ` - last slot in which a blockhash will be valid +- `blockhash: ` - a Hash as base-58 encoded string +- `feeCalculator: ` - FeeCalculator object, the fee schedule for this block hash +- `lastValidSlot: ` - last slot in which a blockhash will be valid #### Example: @@ -594,7 +601,7 @@ None #### Results: -* `` - Slot +- `` - Slot #### Example: @@ -616,7 +623,7 @@ None #### Results: -* `` - a Hash as base-58 encoded string +- `` - a Hash as base-58 encoded string #### Example: @@ -640,7 +647,7 @@ None The result field will be a JSON object with the following fields: -* `identity`, the identity pubkey of the current node \(as a base-58 encoded string\) +- `identity`, the identity pubkey of the current node \(as a base-58 encoded string\) #### Example: @@ -657,17 +664,17 @@ Returns the current inflation governor #### Parameters: -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: The result field will be a JSON object with the following fields: -* `initial: `, the initial inflation percentage from time 0 -* `terminal: `, terminal inflation percentage -* `taper: `, rate per year at which inflation is lowered -* `foundation: `, percentage of total inflation allocated to the foundation -* `foundationTerm: `, duration of foundation pool inflation in years +- `initial: `, the initial inflation percentage from time 0 +- `terminal: `, terminal inflation percentage +- `taper: `, rate per year at which inflation is lowered +- `foundation: `, percentage of total inflation allocated to the foundation +- `foundationTerm: `, duration of foundation pool inflation in years #### Example: @@ -691,10 +698,10 @@ None The result field will be a JSON object with the following fields: -* `total: `, total inflation -* `validator: `, inflation allocated to validators -* `foundation: `, inflation allocated to the foundation -* `epoch: `, epoch for which these values are valid +- `total: `, total inflation +- `validator: `, inflation allocated to validators +- `foundation: `, inflation allocated to the foundation +- `epoch: `, epoch for which these values are valid #### Example: @@ -712,17 +719,17 @@ Returns the 20 largest accounts, by lamport balance #### Parameters: -* `` - (optional) Configuration object containing the following optional fields: - * (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) - * (optional) `filter: ` - filter results by account type; currently supported: `circulating|nonCirculating` +- `` - (optional) Configuration object containing the following optional fields: + - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) + - (optional) `filter: ` - filter results by account type; currently supported: `circulating|nonCirculating` #### Results: The result will be an RpcResponse JSON object with `value` equal to an array of: -* `` - otherwise, a JSON object containing: - * `address: `, base-58 encoded address of the account - * `lamports: `, number of lamports in the account, as a u64 +- `` - otherwise, a JSON object containing: + - `address: `, base-58 encoded address of the account + - `lamports: `, number of lamports in the account, as a u64 #### Example: @@ -740,13 +747,13 @@ Returns the leader schedule for an epoch #### Parameters: -* `` - (optional) Fetch the leader schedule for the epoch that corresponds to the provided slot. If unspecified, the leader schedule for the current epoch is fetched -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) Fetch the leader schedule for the epoch that corresponds to the provided slot. If unspecified, the leader schedule for the current epoch is fetched +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: -* `` - if requested epoch is not found -* `` - otherwise, the result field will be a dictionary of leader public keys +- `` - if requested epoch is not found +- `` - otherwise, the result field will be a dictionary of leader public keys \(as base-58 encoded strings\) and their corresponding leader slot indices as values (indices are to the first slot in the requested epoch) @@ -766,12 +773,12 @@ Returns minimum balance required to make account rent exempt. #### Parameters: -* `` - account data length -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - account data length +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: -* `` - minimum lamports required in account +- `` - minimum lamports required in account #### Example: @@ -789,31 +796,31 @@ Returns all accounts owned by the provided program Pubkey #### Parameters: -* `` - Pubkey of program, as base-58 encoded string -* `` - (optional) Configuration object containing the following optional fields: - * (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) - * (optional) `encoding: ` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary. +- `` - Pubkey of program, as base-58 encoded string +- `` - (optional) Configuration object containing the following optional fields: + - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) + - (optional) `encoding: ` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary. Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type ``. - * (optional) `filters: ` - filter results using various [filter objects](jsonrpc-api.md#filters); account must meet all filter criteria to be included in results + - (optional) `filters: ` - filter results using various [filter objects](jsonrpc-api.md#filters); account must meet all filter criteria to be included in results ##### Filters: -* `memcmp: ` - compares a provided series of bytes with program account data at a particular offset. Fields: - * `offset: ` - offset into program account data to start comparison - * `bytes: ` - data to match, as base-58 encoded string +- `memcmp: ` - compares a provided series of bytes with program account data at a particular offset. Fields: + - `offset: ` - offset into program account data to start comparison + - `bytes: ` - data to match, as base-58 encoded string -* `dataSize: ` - compares the program account data length with the provided data size +- `dataSize: ` - compares the program account data length with the provided data size #### Results: The result field will be an array of JSON objects, which will contain: -* `pubkey: ` - the account Pubkey as base-58 encoded string -* `account: ` - a JSON object, with the following sub fields: - * `lamports: `, number of lamports assigned to this account, as a u64 - * `owner: `, base-58 encoded Pubkey of the program this account has been assigned to +- `pubkey: ` - the account Pubkey as base-58 encoded string +- `account: ` - a JSON object, with the following sub fields: + - `lamports: `, number of lamports assigned to this account, as a u64 + - `owner: `, base-58 encoded Pubkey of the program this account has been assigned to `data: `, data associated with the account, either as base-58 encoded binary data or JSON format `{: }`, depending on encoding parameter - * `executable: `, boolean indicating if the account contains a program \(and is strictly read-only\) - * `rentEpoch`: , the epoch at which this account will next owe rent, as u64 + - `executable: `, boolean indicating if the account contains a program \(and is strictly read-only\) + - `rentEpoch: `, the epoch at which this account will next owe rent, as u64 #### Example: @@ -837,15 +844,15 @@ Returns a recent block hash from the ledger, and a fee schedule that can be used #### Parameters: -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: An RpcResponse containing a JSON object consisting of a string blockhash and FeeCalculator JSON object. -* `RpcResponse` - RpcResponse JSON object with `value` field set to a JSON object including: -* `blockhash: ` - a Hash as base-58 encoded string -* `feeCalculator: ` - FeeCalculator object, the fee schedule for this block hash +- `RpcResponse` - RpcResponse JSON object with `value` field set to a JSON object including: +- `blockhash: ` - a Hash as base-58 encoded string +- `feeCalculator: ` - FeeCalculator object, the fee schedule for this block hash #### Example: @@ -866,26 +873,26 @@ active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots. #### Parameters: -* `` - An array of transaction signatures to confirm, as base-58 encoded strings -* `` - (optional) Configuration object containing the following field: - * `searchTransactionHistory: ` - if true, a Solana node will search its ledger cache for any signatures not found in the recent status cache +- `` - An array of transaction signatures to confirm, as base-58 encoded strings +- `` - (optional) Configuration object containing the following field: + - `searchTransactionHistory: ` - if true, a Solana node will search its ledger cache for any signatures not found in the recent status cache #### Results: An RpcResponse containing a JSON object consisting of an array of TransactionStatus objects. -* `RpcResponse` - RpcResponse JSON object with `value` field: +- `RpcResponse` - RpcResponse JSON object with `value` field: An array of: -* `` - Unknown transaction -* `` - * `slot: ` - The slot the transaction was processed - * `confirmations: ` - Number of blocks since signature confirmation, null if rooted, as well as finalized by a supermajority of the cluster - * `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14) - * DEPRECATED: `status: ` - Transaction status - * `"Ok": ` - Transaction was successful - * `"Err": ` - Transaction failed with TransactionError +- `` - Unknown transaction +- `` + - `slot: ` - The slot the transaction was processed + - `confirmations: ` - Number of blocks since signature confirmation, null if rooted, as well as finalized by a supermajority of the cluster + - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14) + - DEPRECATED: `status: ` - Transaction status + - `"Ok": ` - Transaction was successful + - `"Err": ` - Transaction failed with TransactionError #### Example: @@ -909,11 +916,11 @@ Returns the current slot the node is processing #### Parameters: -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: -* `` - Current slot +- `` - Current slot #### Example: @@ -931,11 +938,11 @@ Returns the current slot leader #### Parameters: -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: -* `` - Node identity Pubkey as base-58 encoded string +- `` - Node identity Pubkey as base-58 encoded string #### Example: @@ -988,16 +995,16 @@ Returns information about the current supply. #### Parameters: -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: The result will be an RpcResponse JSON object with `value` equal to a JSON object containing: -* `total: ` - Total supply in lamports -* `circulating: ` - Circulating supply in lamports -* `nonCirculating: ` - Non-circulating supply in lamports -* `nonCirculatingAccounts: ` - an array of account addresses of non-circulating accounts, as strings +- `total: ` - Total supply in lamports +- `circulating: ` - Circulating supply in lamports +- `nonCirculating: ` - Non-circulating supply in lamports +- `nonCirculatingAccounts: ` - an array of account addresses of non-circulating accounts, as strings #### Example: @@ -1014,11 +1021,11 @@ Returns the current Transaction count from the ledger #### Parameters: -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: -* `` - count +- `` - count #### Example: @@ -1042,7 +1049,7 @@ None The result field will be a JSON object with the following fields: -* `solana-core`, software version of solana-core +- `solana-core`, software version of solana-core #### Example: @@ -1059,19 +1066,19 @@ Returns the account info and associated stake for all the voting accounts in the #### Parameters: -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: The result field will be a JSON object of `current` and `delinquent` accounts, each containing an array of JSON objects with the following sub fields: -* `votePubkey: ` - Vote account public key, as base-58 encoded string -* `nodePubkey: ` - Node public key, as base-58 encoded string -* `activatedStake: ` - the stake, in lamports, delegated to this vote account and active in this epoch -* `epochVoteAccount: ` - bool, whether the vote account is staked for this epoch -* `commission: `, percentage (0-100) of rewards payout owed to the vote account -* `lastVote: ` - Most recent slot voted on by this vote account -* `epochCredits: ` - History of how many credits earned by the end of each epoch, as an array of arrays containing: `[epoch, credits, previousCredits]` +- `votePubkey: ` - Vote account public key, as base-58 encoded string +- `nodePubkey: ` - Node public key, as base-58 encoded string +- `activatedStake: ` - the stake, in lamports, delegated to this vote account and active in this epoch +- `epochVoteAccount: ` - bool, whether the vote account is staked for this epoch +- `commission: `, percentage (0-100) of rewards payout owed to the vote account +- `lastVote: ` - Most recent slot voted on by this vote account +- `epochCredits: ` - History of how many credits earned by the end of each epoch, as an array of arrays containing: `[epoch, credits, previousCredits]` #### Example: @@ -1085,7 +1092,7 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m ### minimumLedgerSlot -Returns the lowest slot that the node has information about in its ledger. This +Returns the lowest slot that the node has information about in its ledger. This value may increase over time if the node is configured to purge older ledger data #### Parameters: @@ -1094,7 +1101,7 @@ None #### Results: -* `u64` - Minimum ledger slot +- `u64` - Minimum ledger slot #### Example: @@ -1112,13 +1119,13 @@ Requests an airdrop of lamports to a Pubkey #### Parameters: -* `` - Pubkey of account to receive lamports, as base-58 encoded string -* `` - lamports, as a u64 -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) (used for retrieving blockhash and verifying airdrop success) +- `` - Pubkey of account to receive lamports, as base-58 encoded string +- `` - lamports, as a u64 +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) (used for retrieving blockhash and verifying airdrop success) #### Results: -* `` - Transaction Signature of airdrop, as base-58 encoded string +- `` - Transaction Signature of airdrop, as base-58 encoded string #### Example: @@ -1135,21 +1142,21 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m Submits a signed transaction to the cluster for processing. Before submitting, the following preflight checks are performed: + 1. The transaction signatures are verified 2. The transaction is simulated against the latest max confirmed bank -and on failure an error will be returned. Preflight checks may be disabled if -desired. + and on failure an error will be returned. Preflight checks may be disabled if + desired. #### Parameters: -* `` - fully-signed Transaction, as base-58 encoded string -* `` - (optional) Configuration object containing the following field: - * `skipPreflight: ` - if true, skip the preflight transaction checks (default: false) - +- `` - fully-signed Transaction, as base-58 encoded string +- `` - (optional) Configuration object containing the following field: + - `skipPreflight: ` - if true, skip the preflight transaction checks (default: false) #### Results: -* `` - Transaction Signature, as base-58 encoded string +- `` - Transaction Signature, as base-58 encoded string #### Example: @@ -1167,17 +1174,17 @@ Simulate sending a transaction #### Parameters: -* `` - Transaction, as base-58 encoded string. The transaction must have a valid blockhash, but is not required to be signed. -* `` - (optional) Configuration object containing the following field: - * `sigVerify: ` - if true the transaction signatures will be verified (default: false) +- `` - Transaction, as base-58 encoded string. The transaction must have a valid blockhash, but is not required to be signed. +- `` - (optional) Configuration object containing the following field: + - `sigVerify: ` - if true the transaction signatures will be verified (default: false) #### Results: An RpcResponse containing a TransactionStatus object The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields: -* `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14) -* `logs: ` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure) +- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14) +- `logs: ` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure) #### Example: @@ -1195,11 +1202,11 @@ Sets the log filter on the validator #### Parameters: -* `` - the new log filter to use +- `` - the new log filter to use #### Results: -* `` +- `` #### Example: @@ -1221,7 +1228,7 @@ None #### Results: -* `` - Whether the validator exit operation was successful +- `` - Whether the validator exit operation was successful #### Example: @@ -1237,9 +1244,9 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m After connecting to the RPC PubSub websocket at `ws://
/`: -* Submit subscription requests to the websocket using the methods below -* Multiple subscriptions may be active at once -* Many subscriptions take the optional [`commitment` parameter](jsonrpc-api.md#configuring-state-commitment), defining how finalized a change should be to trigger a notification. For subscriptions, if commitment is unspecified, the default value is `"single"`. +- Submit subscription requests to the websocket using the methods below +- Multiple subscriptions may be active at once +- Many subscriptions take the optional [`commitment` parameter](jsonrpc-api.md#configuring-state-commitment), defining how finalized a change should be to trigger a notification. For subscriptions, if commitment is unspecified, the default value is `"single"`. ### accountSubscribe @@ -1247,12 +1254,12 @@ Subscribe to an account to receive notifications when the lamports or data for a #### Parameters: -* `` - account Pubkey, as base-58 encoded string -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - account Pubkey, as base-58 encoded string +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: -* `` - Subscription id \(needed to unsubscribe\) +- `` - Subscription id \(needed to unsubscribe\) #### Example: @@ -1296,11 +1303,11 @@ Unsubscribe from account change notifications #### Parameters: -* `` - id of account Subscription to cancel +- `` - id of account Subscription to cancel #### Results: -* `` - unsubscribe success message +- `` - unsubscribe success message #### Example: @@ -1318,12 +1325,12 @@ Subscribe to a program to receive notifications when the lamports or data for a #### Parameters: -* `` - program\_id Pubkey, as base-58 encoded string -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - program_id Pubkey, as base-58 encoded string +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) #### Results: -* `` - Subscription id \(needed to unsubscribe\) +- `` - Subscription id \(needed to unsubscribe\) #### Example: @@ -1370,11 +1377,11 @@ Unsubscribe from program-owned account change notifications #### Parameters: -* `` - id of account Subscription to cancel +- `` - id of account Subscription to cancel #### Results: -* `` - unsubscribe success message +- `` - unsubscribe success message #### Example: @@ -1392,14 +1399,14 @@ Subscribe to a transaction signature to receive notification when the transactio #### Parameters: -* `` - Transaction Signature, as base-58 encoded string -* `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) +- `` - Transaction Signature, as base-58 encoded string +- `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\) #### Results: -* `integer` - subscription id \(needed to unsubscribe\) +- `integer` - subscription id \(needed to unsubscribe\) #### Example: @@ -1439,11 +1446,11 @@ Unsubscribe from signature confirmation notification #### Parameters: -* `` - subscription id to cancel +- `` - subscription id to cancel #### Results: -* `` - unsubscribe success message +- `` - unsubscribe success message #### Example: @@ -1465,7 +1472,7 @@ None #### Results: -* `integer` - subscription id \(needed to unsubscribe\) +- `integer` - subscription id \(needed to unsubscribe\) #### Example: @@ -1500,11 +1507,11 @@ Unsubscribe from slot notifications #### Parameters: -* `` - subscription id to cancel +- `` - subscription id to cancel #### Results: -* `` - unsubscribe success message +- `` - unsubscribe success message #### Example: @@ -1526,7 +1533,7 @@ None #### Results: -* `integer` - subscription id \(needed to unsubscribe\) +- `integer` - subscription id \(needed to unsubscribe\) #### Example: @@ -1559,11 +1566,11 @@ Unsubscribe from root notifications #### Parameters: -* `` - subscription id to cancel +- `` - subscription id to cancel #### Results: -* `` - unsubscribe success message +- `` - unsubscribe success message #### Example: @@ -1587,7 +1594,7 @@ None #### Results: -* `integer` - subscription id \(needed to unsubscribe\) +- `integer` - subscription id \(needed to unsubscribe\) #### Example: @@ -1624,11 +1631,11 @@ Unsubscribe from vote notifications #### Parameters: -* `` - subscription id to cancel +- `` - subscription id to cancel #### Results: -* `` - unsubscribe success message +- `` - unsubscribe success message #### Example: diff --git a/docs/src/apps/rent.md b/docs/src/apps/rent.md index e20af9b9df..0703d2466d 100644 --- a/docs/src/apps/rent.md +++ b/docs/src/apps/rent.md @@ -1,4 +1,6 @@ -# Storage Rent for Accounts +--- +title: Storage Rent for Accounts +--- Keeping accounts alive on Solana incurs a storage cost called _rent_ because the cluster must actively maintain the data to process any future transactions on it. This is different from Bitcoin and Ethereum, where storing accounts doesn't incur any costs. diff --git a/docs/src/apps/tictactoe.md b/docs/src/apps/tictactoe.md index dd2b4ea41f..fe603e9639 100644 --- a/docs/src/apps/tictactoe.md +++ b/docs/src/apps/tictactoe.md @@ -1,4 +1,6 @@ -# Example: Tic-Tac-Toe +--- +title: "Example: Tic-Tac-Toe" +--- [Click here to play Tic-Tac-Toe](https://solana-example-tictactoe.herokuapp.com/) on the Solana testnet. Open the link and wait for another player to join, or open the link in a second browser tab to play against yourself. You will see that every move a player makes stores a transaction on the ledger. @@ -19,4 +21,3 @@ Next, follow the steps in the git repository's [README](https://github.com/solan ## Getting lamports to users You may have noticed you interacted with the Solana cluster without first needing to acquire lamports to pay transaction fees. Under the hood, the web app creates a new ephemeral identity and sends a request to an off-chain service for a signed transaction authorizing a user to start a new game. The service is called a _drone_. When the app sends the signed transaction to the Solana cluster, the drone's lamports are spent to pay the transaction fee and start the game. In a real world app, the drone might request the user watch an ad or pass a CAPTCHA before signing over its lamports. - diff --git a/docs/src/apps/webwallet.md b/docs/src/apps/webwallet.md index 953a6dac3a..98962c347e 100644 --- a/docs/src/apps/webwallet.md +++ b/docs/src/apps/webwallet.md @@ -1,4 +1,6 @@ -# Example Client: Web Wallet +--- +title: "Example Client: Web Wallet" +--- ## Build and run a web wallet locally @@ -13,4 +15,3 @@ $ git checkout $TAG ``` Next, follow the steps in the git repository's [README](https://github.com/solana-labs/example-webwallet/blob/master/README.md). - diff --git a/docs/src/cli/README.md b/docs/src/cli/README.md index 8078cf97e6..c420b5d4a8 100644 --- a/docs/src/cli/README.md +++ b/docs/src/cli/README.md @@ -1,7 +1,9 @@ -# Command-line Guide +--- +title: Command-line Guide +--- In this section, we will describe how to use the Solana command-line tools to -create a *wallet*, to send and receive SOL tokens, and to participate in +create a _wallet_, to send and receive SOL tokens, and to participate in the cluster by delegating stake. To interact with a Solana cluster, we will use its command-line interface, also @@ -11,8 +13,10 @@ necessarily the easiest to use, but it provides the most direct, flexible, and secure access to your Solana accounts. ## Getting Started + To get started using the Solana Command Line (CLI) tools: - - [Install the Solana Tools](install-solana-cli-tools.md) - - [Choose a Cluster](choose-a-cluster.md) - - [Create a Wallet](../wallet-guide/cli.md) - - [Check out our CLI conventions](conventions.md) + +- [Install the Solana Tools](install-solana-cli-tools.md) +- [Choose a Cluster](choose-a-cluster.md) +- [Create a Wallet](../wallet-guide/cli.md) +- [Check out our CLI conventions](conventions.md) diff --git a/docs/src/cli/choose-a-cluster.md b/docs/src/cli/choose-a-cluster.md index 3c5d998da1..a2bc4cb2b4 100644 --- a/docs/src/cli/choose-a-cluster.md +++ b/docs/src/cli/choose-a-cluster.md @@ -1,8 +1,12 @@ -# Connecting to a Cluster +--- +title: Connecting to a Cluster +--- + See [Solana Clusters](../clusters.md) for general information about the available clusters. ## Configure the command-line tool + You can check what cluster the Solana command-line tool (CLI) is currently targeting by running the following command: @@ -10,11 +14,12 @@ running the following command: solana config get ``` -Use `solana config set` command to target a particular cluster. After setting +Use `solana config set` command to target a particular cluster. After setting a cluster target, any future subcommands will send/receive information from that cluster. For example to target the Devnet cluster, run: + ```bash solana config set --url https://devnet.solana.com ``` diff --git a/docs/src/cli/conventions.md b/docs/src/cli/conventions.md index d101cbd821..0f5d5acc8d 100644 --- a/docs/src/cli/conventions.md +++ b/docs/src/cli/conventions.md @@ -1,4 +1,6 @@ -# Using Solana CLI +--- +title: Using Solana CLI +--- Before running any Solana CLI commands, let's go over some conventions that you will see across all commands. First, the Solana CLI is actually a collection @@ -19,7 +21,7 @@ where you replace the text `` with the name of the command you want to learn more about. The command's usage message will typically contain words such as ``, -`` or ``. Each word is a placeholder for the *type* of +`` or ``. Each word is a placeholder for the _type_ of text you can execute the command with. For example, you can replace `` with a number such as `42` or `100.42`. You can replace `` with the base58 encoding of your public key, such as @@ -27,12 +29,13 @@ the base58 encoding of your public key, such as ## Keypair conventions -Many commands using the CLI tools require a value for a ``. The value +Many commands using the CLI tools require a value for a ``. The value you should use for the keypair depend on what type of [command line wallet you created](../wallet-guide/cli.md). For example, the way to display any wallet's address (also known as the keypair's pubkey), the CLI help document shows: + ```bash solana-keygen pubkey ``` @@ -49,9 +52,11 @@ enter the word `ASK` and the program will prompt you to enter your seed words when you run the command. To display the wallet address of a Paper Wallet: + ```bash solana-keygen pubkey ASK ``` + #### File System Wallet With a file system wallet, the keypair is stored in a file on your computer. @@ -59,6 +64,7 @@ Replace `` with the complete file path to the keypair file. For example, if the file system keypair file location is `/home/solana/my_wallet.json`, to display the address, do: + ```bash solana-keygen pubkey /home/solana/my_wallet.json ``` @@ -68,6 +74,7 @@ solana-keygen pubkey /home/solana/my_wallet.json If you chose a hardware wallet, use your [keypair URL](../hardware-wallets/README.md#specify-a-hardware-wallet-key), such as `usb://ledger?key=0`. + ```bash solana-keygen pubkey usb://ledger?key=0 -``` \ No newline at end of file +``` diff --git a/docs/src/cli/delegate-stake.md b/docs/src/cli/delegate-stake.md index 40cea498ab..bb2d5f602f 100644 --- a/docs/src/cli/delegate-stake.md +++ b/docs/src/cli/delegate-stake.md @@ -1,8 +1,14 @@ -# Delegate Stake -This page describes the workflow and commands needed to create and manage stake -accounts, and to delegate your stake accounts to a validator using the Solana -command-line tools. The [stake accounts](../staking/stake-accounts.md) -document provides an overview of stake account features and concepts. +--- +title: Delegate Stake +--- + +After you have [received SOL](transfer-tokens.md), you might consider putting +it to use by delegating _stake_ to a validator. Stake is what we call tokens +in a _stake account_. Solana weights validator votes by the amount of stake +delegated to them, which gives those validators more influence in determining +then next valid block of transactions in the blockchain. Solana then generates +new SOL periodically to reward stakers and validators. You earn more rewards +the more stake you delegate. ## Create a Stake Account To delegate stake, you will need to transfer some tokens into a stake account. @@ -87,8 +93,7 @@ solana create-stake-account --from --seed ` acts as the base address. The command derives a new address from the base address -and seed string. To see what stake address the command will derive, use `solana -create-address-with-seed`: +and seed string. To see what stake address the command will derive, use `solana create-address-with-seed`: ```bash solana create-address-with-seed --from STAKE @@ -190,6 +195,6 @@ keypair for the new account, and `` is the number of tokens to transfer to the new account. To split a stake account into a derived account address, use the `--seed` -option. See +option. See [Derive Stake Account Addresses](#advanced-derive-stake-account-addresses) for details. diff --git a/docs/src/cli/install-solana-cli-tools.md b/docs/src/cli/install-solana-cli-tools.md index 210dbefe6b..3c438d42ec 100644 --- a/docs/src/cli/install-solana-cli-tools.md +++ b/docs/src/cli/install-solana-cli-tools.md @@ -1,28 +1,31 @@ -# Install the Solana Tool Suite +--- +title: Install the Solana Tool Suite +--- There are multiple ways to install the Solana tools on your computer depending on your preferred workflow: - - [Use Solana's Install Tool (Simplest option)](#use-solanas-install-tool) - - [Download Prebuilt Binaries](#download-prebuilt-binaries) - - [Build from Source](#build-from-source) + +- [Use Solana's Install Tool (Simplest option)](#use-solanas-install-tool) +- [Download Prebuilt Binaries](#download-prebuilt-binaries) +- [Build from Source](#build-from-source) ## Use Solana's Install Tool ### MacOS & Linux - - Open your favorite Terminal application +- Open your favorite Terminal application - - Install the Solana release -[LATEST_SOLANA_RELEASE_VERSION](https://github.com/solana-labs/solana/releases/tag/LATEST_SOLANA_RELEASE_VERSION) on your -machine by running: +- Install the Solana release + [LATEST_SOLANA_RELEASE_VERSION](https://github.com/solana-labs/solana/releases/tag/LATEST_SOLANA_RELEASE_VERSION) on your + machine by running: ```bash curl -sSf https://raw.githubusercontent.com/solana-labs/solana/LATEST_SOLANA_RELEASE_VERSION/install/solana-install-init.sh | sh -s - LATEST_SOLANA_RELEASE_VERSION ``` - - If you are connecting to a different testnet, you can replace `LATEST_SOLANA_RELEASE_VERSION` with the -release tag matching the software version of your desired testnet, or replace it -with the named channel `stable`, `beta`, or `edge`. +- If you are connecting to a different testnet, you can replace `LATEST_SOLANA_RELEASE_VERSION` with the + release tag matching the software version of your desired testnet, or replace it + with the named channel `stable`, `beta`, or `edge`. - The following output indicates a successful update: @@ -36,59 +39,64 @@ Active release directory: /home/solana/.local/share/solana/install/active_releas Update successful ``` - - Depending on your system, the end of the installer messaging may prompt you - to - ```bash +- Depending on your system, the end of the installer messaging may prompt you + to + +```bash Please update your PATH environment variable to include the solana programs: ``` - - If you get the above message, copy and paste the recommended command below - it to update `PATH` - - Confirm you have the desired version of `solana` installed by running: - ```bash - solana --version + +- If you get the above message, copy and paste the recommended command below + it to update `PATH` +- Confirm you have the desired version of `solana` installed by running: + +```bash +solana --version ``` - - After a successful install, `solana-install update` may be used to easily -update the Solana software to a newer version at any time. +- After a successful install, `solana-install update` may be used to easily + update the Solana software to a newer version at any time. -*** +--- ###Windows - - Open a Command Prompt (`cmd.exe`) as an Administrator - - Search for Command Prompt in the Windows search bar. When the Command - Prompt app appears, right-click and select “Open as Administrator”. -If you are prompted by a pop-up window asking “Do you want to allow this app to -make changes to your device?”, click Yes. +- Open a Command Prompt (`cmd.exe`) as an Administrator - - Copy and paste the following command, then press Enter to download the Solana - installer into a temporary directory: + - Search for Command Prompt in the Windows search bar. When the Command + Prompt app appears, right-click and select “Open as Administrator”. + If you are prompted by a pop-up window asking “Do you want to allow this app to + make changes to your device?”, click Yes. + +- Copy and paste the following command, then press Enter to download the Solana + installer into a temporary directory: ```bash curl http://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/solana-install-init-x86_64-pc-windows-gnu.exe --output C:\solana-install-tmp\solana-install-init.exe --create-dirs ``` - - Copy and paste the following command, then press Enter to install the latest - version of Solana. If you see a security pop-up by your system, please select - to allow the program to run. +- Copy and paste the following command, then press Enter to install the latest + version of Solana. If you see a security pop-up by your system, please select + to allow the program to run. ```bash C:\solana-install-tmp\solana-install-init.exe LATEST_SOLANA_RELEASE_VERSION ``` - - When the installer is finished, press Enter. +- When the installer is finished, press Enter. - - Close the command prompt window and re-open a new command prompt window as a -normal user - - Search for "Command Prompt" in the search bar, then left click on the -Command Prompt app icon, no need to run as Administrator) - - Confirm you have the desired version of `solana` installed by entering: - ```bash - solana --version +- Close the command prompt window and re-open a new command prompt window as a + normal user + - Search for "Command Prompt" in the search bar, then left click on the + Command Prompt app icon, no need to run as Administrator) +- Confirm you have the desired version of `solana` installed by entering: + +```bash +solana --version ``` - - After a successful install, `solana-install update` may be used to easily -update the Solana software to a newer version at any time. +- After a successful install, `solana-install update` may be used to easily + update the Solana software to a newer version at any time. ## Download Prebuilt Binaries @@ -99,7 +107,7 @@ manually download and install the binaries. Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), -download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the +download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the archive: ```bash @@ -112,7 +120,7 @@ export PATH=$PWD/bin:$PATH Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), -download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the +download **solana-release-x86_64-apple-darwin.tar.bz2**, then extract the archive: ```bash @@ -124,12 +132,12 @@ export PATH=$PWD/bin:$PATH ### Windows - Download the binaries by navigating to -[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), -download **solana-release-x86\_64-pc-windows-gnu.tar.bz2**, then extract the -archive using WinZip or similar. + [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), + download **solana-release-x86_64-pc-windows-gnu.tar.bz2**, then extract the + archive using WinZip or similar. - Open a Command Prompt and navigate to the directory into which you extracted -the binaries and run: + the binaries and run: ```bash cd solana-release/ diff --git a/docs/src/cli/manage-stake-accounts.md b/docs/src/cli/manage-stake-accounts.md index a5532c924b..b07b0ad79e 100644 --- a/docs/src/cli/manage-stake-accounts.md +++ b/docs/src/cli/manage-stake-accounts.md @@ -1,4 +1,6 @@ -# Manage Stake Accounts +--- +title: Manage Stake Accounts +--- If you want to delegate stake to many different validators, you will need to create a separate stake account for each. If you follow the convention diff --git a/docs/src/cli/transfer-tokens.md b/docs/src/cli/transfer-tokens.md index 4ad17f4723..5111de3b02 100644 --- a/docs/src/cli/transfer-tokens.md +++ b/docs/src/cli/transfer-tokens.md @@ -1,10 +1,13 @@ -# Send and Receive Tokens +--- +title: Send and Receive Tokens +--- + This page decribes how to receive and send SOL tokens using the command line tools with a command line wallet such as a [paper wallet](../paper-wallet/README.md), a [file system wallet](../file-system-wallet/README.md), or a -[hardware wallet](../hardware-wallets/README.md). Before you begin, make sure +[hardware wallet](../hardware-wallets/README.md). Before you begin, make sure you have created a wallet and have access to its address (pubkey) and the -signing keypair. Check out our +signing keypair. Check out our [conventions for entering keypairs for different wallet types](../cli/conventions.md#keypair-conventions). ## Testing your Wallet @@ -13,15 +16,15 @@ Before sharing your public key with others, you may want to first ensure the key is valid and that you indeed hold the corresponding private key. In this example, we will create a second wallet in addition to your first wallet, -and then transfer some tokens to it. This will confirm that you can send and +and then transfer some tokens to it. This will confirm that you can send and receive tokens on your wallet type of choice. -This test example uses our Developer Testnet, called devnet. Tokens issued +This test example uses our Developer Testnet, called devnet. Tokens issued on devnet have **no** value, so don't worry if you lose them. #### Airdrop some tokens to get started -First, *airdrop* yourself some play tokens on the devnet. +First, _airdrop_ yourself some play tokens on the devnet. ```bash solana airdrop 10 --url https://devnet.solana.com @@ -85,6 +88,7 @@ where `` is either the public key from your keypair or the recipient's public key. #### Full example of test transfer + ```bash $ solana-keygen new --outfile my_solana_wallet.json # Creating my first wallet, a file system wallet Generating a new keypair @@ -130,7 +134,7 @@ $ solana balance 7S3P4HxJpyyigGzodYwHtCxZyUQe9JiBMHyRWXArAaKv --url https://devn To receive tokens, you will need an address for others to send tokens to. In Solana, the wallet address is the public key of a keypair. There are a variety of techniques for generating keypairs. The method you choose will depend on how -you choose to store keypairs. Keypairs are stored in wallets. Before receiving +you choose to store keypairs. Keypairs are stored in wallets. Before receiving tokens, you will need to [create a wallet](../wallet-guide/cli.md). Once completed, you should have a public key for each keypair you generated. The public key is a long string of base58 diff --git a/docs/src/cluster/README.md b/docs/src/cluster/README.md index 3c34106563..fc73263ac3 100644 --- a/docs/src/cluster/README.md +++ b/docs/src/cluster/README.md @@ -1,4 +1,6 @@ -# A Solana Cluster +--- +title: A Solana Cluster +--- A Solana cluster is a set of validators working together to serve client transactions and maintain the integrity of the ledger. Many clusters may coexist. When two clusters share a common genesis block, they attempt to converge. Otherwise, they simply ignore the existence of the other. Transactions sent to the wrong one are quietly rejected. In this section, we'll discuss how a cluster is created, how nodes join the cluster, how they share the ledger, how they ensure the ledger is replicated, and how they cope with buggy and malicious nodes. diff --git a/docs/src/cluster/bench-tps.md b/docs/src/cluster/bench-tps.md index eb65c47049..9e04000c5e 100644 --- a/docs/src/cluster/bench-tps.md +++ b/docs/src/cluster/bench-tps.md @@ -1,4 +1,6 @@ -# Benchmark a Cluster +--- +title: Benchmark a Cluster +--- The Solana git repository contains all the scripts you might need to spin up your own local testnet. Depending on what you're looking to achieve, you may want to run a different variation, as the full-fledged, performance-enhanced multinode testnet is considerably more complex to set up than a Rust-only, singlenode testnode. If you are looking to develop high-level features, such as experimenting with smart contracts, save yourself some setup headaches and stick to the Rust-only singlenode demo. If you're doing performance optimization of the transaction pipeline, consider the enhanced singlenode demo. If you're doing consensus work, you'll need at least a Rust-only multinode demo. If you want to reproduce our TPS metrics, run the enhanced multinode demo. @@ -92,17 +94,17 @@ What just happened? The client demo spins up several threads to send 500,000 tra ### Testnet Debugging -There are some useful debug messages in the code, you can enable them on a per-module and per-level basis. Before running a leader or validator set the normal RUST\_LOG environment variable. +There are some useful debug messages in the code, you can enable them on a per-module and per-level basis. Before running a leader or validator set the normal RUST_LOG environment variable. For example -* To enable `info` everywhere and `debug` only in the solana::banking\_stage module: +- To enable `info` everywhere and `debug` only in the solana::banking_stage module: ```bash $ export RUST_LOG=solana=info,solana::banking_stage=debug ``` -* To enable BPF program logging: +- To enable BPF program logging: ```bash $ export RUST_LOG=solana_bpf_loader=trace diff --git a/docs/src/cluster/fork-generation.md b/docs/src/cluster/fork-generation.md index 01d97390dd..e89aef6214 100644 --- a/docs/src/cluster/fork-generation.md +++ b/docs/src/cluster/fork-generation.md @@ -1,4 +1,6 @@ -# Fork Generation +--- +title: Fork Generation +--- This section describes how forks naturally occur as a consequence of [leader rotation](leader-rotation.md). @@ -58,7 +60,7 @@ Validators vote based on a greedy choice to maximize their reward described in [ The diagram below represents a validator's view of the PoH stream with possible forks over time. L1, L2, etc. are leader slots, and `E`s represent entries from that leader during that leader's slot. The `x`s represent ticks only, and time flows downwards in the diagram. -![Fork generation](../.gitbook/assets/fork-generation.svg) +![Fork generation](/img/fork-generation.svg) Note that an `E` appearing on 2 forks at the same slot is a slashable condition, so a validator observing `E3` and `E3'` can slash L3 and safely choose `x` for that slot. Once a validator commits to a forks, other forks can be discarded below that tick count. For any slot, validators need only consider a single "has entries" chain or a "ticks only" chain to be proposed by a leader. But multiple virtual entries may overlap as they link back to the a previous slot. @@ -66,10 +68,10 @@ Note that an `E` appearing on 2 forks at the same slot is a slashable condition, It's useful to consider leader rotation over PoH tick count as time division of the job of encoding state for the cluster. The following table presents the above tree of forks as a time-divided ledger. -| leader slot | L1 | L2 | L3 | L4 | L5 | -| :--- | :--- | :--- | :--- | :--- | :--- | -| data | E1 | E2 | E3 | E4 | E5 | -| ticks since prev | | | | x | xx | +| leader slot | L1 | L2 | L3 | L4 | L5 | +| :--------------- | :-- | :-- | :-- | :-- | :-- | +| data | E1 | E2 | E3 | E4 | E5 | +| ticks since prev | | | | x | xx | Note that only data from leader L3 will be accepted during leader slot L3. Data from L3 may include "catchup" ticks back to a slot other than L2 if L3 did not observe L2's data. L4 and L5's transmissions include the "ticks to prev" PoH entries. diff --git a/docs/src/cluster/leader-rotation.md b/docs/src/cluster/leader-rotation.md index 56e13f8f02..114117591c 100644 --- a/docs/src/cluster/leader-rotation.md +++ b/docs/src/cluster/leader-rotation.md @@ -1,4 +1,6 @@ -# Leader Rotation +--- +title: Leader Rotation +--- At any given moment, a cluster expects only one validator to produce ledger entries. By having only one leader at a time, all validators are able to replay identical copies of the ledger. The drawback of only one leader at a time, however, is that a malicious leader is capable of censoring votes and transactions. Since censoring cannot be distinguished from the network dropping packets, the cluster cannot simply elect a single node to hold the leader role indefinitely. Instead, the cluster minimizes the influence of a malicious leader by rotating which node takes the lead. @@ -31,8 +33,8 @@ Two partitions that are generating half of the blocks each. Neither is coming to In this unstable scenario, multiple valid leader schedules exist. -* A leader schedule is generated for every fork whose direct parent is in the previous epoch. -* The leader schedule is valid after the start of the next epoch for descendant forks until it is updated. +- A leader schedule is generated for every fork whose direct parent is in the previous epoch. +- The leader schedule is valid after the start of the next epoch for descendant forks until it is updated. Each partition's schedule will diverge after the partition lasts more than an epoch. For this reason, the epoch duration should be selected to be much much larger then slot time and the expected length for a fork to be committed to root. @@ -73,8 +75,8 @@ The seed that is selected is predictable but unbiasable. There is no grinding at A leader can bias the active set by censoring validator votes. Two possible ways exist for leaders to censor the active set: -* Ignore votes from validators -* Refuse to vote for blocks with votes from validators +- Ignore votes from validators +- Refuse to vote for blocks with votes from validators To reduce the likelihood of censorship, the active set is calculated at the leader schedule offset boundary over an _active set sampling duration_. The active set sampling duration is long enough such that votes will have been collected by multiple leaders. diff --git a/docs/src/cluster/managing-forks.md b/docs/src/cluster/managing-forks.md index e00ff6cc3b..d868ebdd2c 100644 --- a/docs/src/cluster/managing-forks.md +++ b/docs/src/cluster/managing-forks.md @@ -1,4 +1,6 @@ -# Managing Forks +--- +title: Managing Forks +--- The ledger is permitted to fork at slot boundaries. The resulting data structure forms a tree called a _blockstore_. When the validator interprets the blockstore, it must maintain state for each fork in the chain. We call each instance an _active fork_. It is the responsibility of a validator to weigh those forks, such that it may eventually select a fork. @@ -8,14 +10,14 @@ A validator selects a fork by submiting a vote to a slot leader on that fork. Th An active fork is as a sequence of checkpoints that has a length at least one longer than the rollback depth. The shortest fork will have a length exactly one longer than the rollback depth. For example: -![Forks](../.gitbook/assets/forks.svg) +![Forks](/img/forks.svg) The following sequences are _active forks_: -* {4, 2, 1} -* {5, 2, 1} -* {6, 3, 1} -* {7, 3, 1} +- {4, 2, 1} +- {5, 2, 1} +- {6, 3, 1} +- {7, 3, 1} ## Pruning and Squashing @@ -23,12 +25,12 @@ A validator may vote on any checkpoint in the tree. In the diagram above, that's Starting from the example above, wth a rollback depth of 2, consider a vote on 5 versus a vote on 6. First, a vote on 5: -![Forks after pruning](../.gitbook/assets/forks-pruned.svg) +![Forks after pruning](/img/forks-pruned.svg) The new root is 2, and any active forks that are not descendants from 2 are pruned. Alternatively, a vote on 6: -![Forks](../.gitbook/assets/forks-pruned2.svg) +![Forks](/img/forks-pruned2.svg) The tree remains with a root of 1, since the active fork starting at 6 is only 2 checkpoints from the root. diff --git a/docs/src/cluster/performance-metrics.md b/docs/src/cluster/performance-metrics.md index 83ec1d3a0f..575c46a26e 100644 --- a/docs/src/cluster/performance-metrics.md +++ b/docs/src/cluster/performance-metrics.md @@ -1,4 +1,6 @@ -# Performance Metrics +--- +title: Performance Metrics +--- Solana cluster performance is measured as average number of transactions per second that the network can sustain \(TPS\). And, how long it takes for a transaction to be confirmed by super majority of the cluster \(Confirmation Time\). @@ -21,4 +23,3 @@ The validator software is deployed to GCP n1-standard-16 instances with 1TB pd-s solana-bench-tps is started after the network converges from a client machine with n1-standard-16 CPU-only instance with the following arguments: `--tx\_count=50000 --thread-batch-sleep 1000` TPS and confirmation metrics are captured from the dashboard numbers over a 5 minute average of when the bench-tps transfer stage begins. - diff --git a/docs/src/cluster/stake-delegation-and-rewards.md b/docs/src/cluster/stake-delegation-and-rewards.md index 9a62482ed0..1d8afe0fe3 100644 --- a/docs/src/cluster/stake-delegation-and-rewards.md +++ b/docs/src/cluster/stake-delegation-and-rewards.md @@ -1,4 +1,6 @@ -# Stake Delegation and Rewards +--- +title: Stake Delegation and Rewards +--- Stakers are rewarded for helping to validate the ledger. They do this by delegating their stake to validator nodes. Those validators do the legwork of replaying the ledger and send votes to a per-node vote account to which stakers can delegate their stakes. The rest of the cluster uses those stake-weighted votes to select a block when forks arise. Both the validator and staker need some economic incentive to play their part. The validator needs to be compensated for its hardware and the staker needs to be compensated for the risk of getting its stake slashed. The economics are covered in [staking rewards](../implemented-proposals/staking-rewards.md). This section, on the other hand, describes the underlying mechanics of its implementation. @@ -22,18 +24,18 @@ The rewards process is split into two on-chain programs. The Vote program solves VoteState is the current state of all the votes the validator has submitted to the network. VoteState contains the following state information: -* `votes` - The submitted votes data structure. -* `credits` - The total number of rewards this vote program has generated over its lifetime. -* `root_slot` - The last slot to reach the full lockout commitment necessary for rewards. -* `commission` - The commission taken by this VoteState for any rewards claimed by staker's Stake accounts. This is the percentage ceiling of the reward. -* Account::lamports - The accumulated lamports from the commission. These do not count as stakes. -* `authorized_voter` - Only this identity is authorized to submit votes. This field can only modified by this identity. -* `node_pubkey` - The Solana node that votes in this account. -* `authorized_withdrawer` - the identity of the entity in charge of the lamports of this account, separate from the account's address and the authorized vote signer +- `votes` - The submitted votes data structure. +- `credits` - The total number of rewards this vote program has generated over its lifetime. +- `root_slot` - The last slot to reach the full lockout commitment necessary for rewards. +- `commission` - The commission taken by this VoteState for any rewards claimed by staker's Stake accounts. This is the percentage ceiling of the reward. +- Account::lamports - The accumulated lamports from the commission. These do not count as stakes. +- `authorized_voter` - Only this identity is authorized to submit votes. This field can only modified by this identity. +- `node_pubkey` - The Solana node that votes in this account. +- `authorized_withdrawer` - the identity of the entity in charge of the lamports of this account, separate from the account's address and the authorized vote signer ### VoteInstruction::Initialize\(VoteInit\) -* `account[0]` - RW - The VoteState +- `account[0]` - RW - The VoteState `VoteInit` carries the new vote account's `node_pubkey`, `authorized_voter`, `authorized_withdrawer`, and `commission` @@ -43,16 +45,16 @@ VoteState is the current state of all the votes the validator has submitted to t Updates the account with a new authorized voter or withdrawer, according to the VoteAuthorize parameter \(`Voter` or `Withdrawer`\). The transaction must be by signed by the Vote account's current `authorized_voter` or `authorized_withdrawer`. -* `account[0]` - RW - The VoteState +- `account[0]` - RW - The VoteState `VoteState::authorized_voter` or `authorized_withdrawer` is set to to `Pubkey`. ### VoteInstruction::Vote\(Vote\) -* `account[0]` - RW - The VoteState +- `account[0]` - RW - The VoteState `VoteState::lockouts` and `VoteState::credits` are updated according to voting lockout rules see [Tower BFT](../implemented-proposals/tower-bft.md) -* `account[1]` - RO - `sysvar::slot_hashes` A list of some N most recent slots and their hashes for the vote to be verified against. -* `account[2]` - RO - `sysvar::clock` The current network time, expressed in slots, epochs. +- `account[1]` - RO - `sysvar::slot_hashes` A list of some N most recent slots and their hashes for the vote to be verified against. +- `account[2]` - RO - `sysvar::clock` The current network time, expressed in slots, epochs. ### StakeState @@ -62,15 +64,15 @@ A StakeState takes one of four forms, StakeState::Uninitialized, StakeState::Ini StakeState::Stake is the current delegation preference of the **staker** and contains the following state information: -* Account::lamports - The lamports available for staking. -* `stake` - the staked amount \(subject to warm up and cool down\) for generating rewards, always less than or equal to Account::lamports -* `voter_pubkey` - The pubkey of the VoteState instance the lamports are delegated to. -* `credits_observed` - The total credits claimed over the lifetime of the program. -* `activated` - the epoch at which this stake was activated/delegated. The full stake will be counted after warm up. -* `deactivated` - the epoch at which this stake was de-activated, some cool down epochs are required before the account is fully deactivated, and the stake available for withdrawal +- Account::lamports - The lamports available for staking. +- `stake` - the staked amount \(subject to warm up and cool down\) for generating rewards, always less than or equal to Account::lamports +- `voter_pubkey` - The pubkey of the VoteState instance the lamports are delegated to. +- `credits_observed` - The total credits claimed over the lifetime of the program. +- `activated` - the epoch at which this stake was activated/delegated. The full stake will be counted after warm up. +- `deactivated` - the epoch at which this stake was de-activated, some cool down epochs are required before the account is fully deactivated, and the stake available for withdrawal -* `authorized_staker` - the pubkey of the entity that must sign delegation, activation, and deactivation transactions -* `authorized_withdrawer` - the identity of the entity in charge of the lamports of this account, separate from the account's address, and the authorized staker +- `authorized_staker` - the pubkey of the entity that must sign delegation, activation, and deactivation transactions +- `authorized_withdrawer` - the identity of the entity in charge of the lamports of this account, separate from the account's address, and the authorized staker ### StakeState::RewardsPool @@ -82,17 +84,17 @@ The Stakes and the RewardsPool are accounts that are owned by the same `Stake` p The Stake account is moved from Initialized to StakeState::Stake form, or from a deactivated (i.e. fully cooled-down) StakeState::Stake to activated StakeState::Stake. This is how stakers choose the vote account and validator node to which their stake account lamports are delegated. The transaction must be signed by the stake's `authorized_staker`. -* `account[0]` - RW - The StakeState::Stake instance. `StakeState::Stake::credits_observed` is initialized to `VoteState::credits`, `StakeState::Stake::voter_pubkey` is initialized to `account[1]`. If this is the initial delegation of stake, `StakeState::Stake::stake` is initialized to the account's balance in lamports, `StakeState::Stake::activated` is initialized to the current Bank epoch, and `StakeState::Stake::deactivated` is initialized to std::u64::MAX -* `account[1]` - R - The VoteState instance. -* `account[2]` - R - sysvar::clock account, carries information about current Bank epoch -* `account[3]` - R - sysvar::stakehistory account, carries information about stake history -* `account[4]` - R - stake::Config accoount, carries warmup, cooldown, and slashing configuration +- `account[0]` - RW - The StakeState::Stake instance. `StakeState::Stake::credits_observed` is initialized to `VoteState::credits`, `StakeState::Stake::voter_pubkey` is initialized to `account[1]`. If this is the initial delegation of stake, `StakeState::Stake::stake` is initialized to the account's balance in lamports, `StakeState::Stake::activated` is initialized to the current Bank epoch, and `StakeState::Stake::deactivated` is initialized to std::u64::MAX +- `account[1]` - R - The VoteState instance. +- `account[2]` - R - sysvar::clock account, carries information about current Bank epoch +- `account[3]` - R - sysvar::stakehistory account, carries information about stake history +- `account[4]` - R - stake::Config accoount, carries warmup, cooldown, and slashing configuration ### StakeInstruction::Authorize\(Pubkey, StakeAuthorize\) -Updates the account with a new authorized staker or withdrawer, according to the StakeAuthorize parameter \(`Staker` or `Withdrawer`\). The transaction must be by signed by the Stakee account's current `authorized_staker` or `authorized_withdrawer`. Any stake lock-up must have expired, or the lock-up custodian must also sign the transaction. +Updates the account with a new authorized staker or withdrawer, according to the StakeAuthorize parameter \(`Staker` or `Withdrawer`\). The transaction must be by signed by the Stakee account's current `authorized_staker` or `authorized_withdrawer`. Any stake lock-up must have expired, or the lock-up custodian must also sign the transaction. -* `account[0]` - RW - The StakeState +- `account[0]` - RW - The StakeState `StakeState::authorized_staker` or `authorized_withdrawer` is set to to `Pubkey`. @@ -101,8 +103,8 @@ Updates the account with a new authorized staker or withdrawer, according to the A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cool down. The transaction must be signed by the stake's `authorized_staker`. -* `account[0]` - RW - The StakeState::Stake instance that is deactivating. -* `account[1]` - R - sysvar::clock account from the Bank that carries current epoch +- `account[0]` - RW - The StakeState::Stake instance that is deactivating. +- `account[1]` - R - sysvar::clock account from the Bank that carries current epoch StakeState::Stake::deactivated is set to the current epoch + cool down. The account's stake will ramp down to zero by that epoch, and Account::lamports will be available for withdrawal. @@ -110,21 +112,21 @@ StakeState::Stake::deactivated is set to the current epoch + cool down. The acco Lamports build up over time in a Stake account and any excess over activated stake can be withdrawn. The transaction must be signed by the stake's `authorized_withdrawer`. -* `account[0]` - RW - The StakeState::Stake from which to withdraw. -* `account[1]` - RW - Account that should be credited with the withdrawn lamports. -* `account[2]` - R - sysvar::clock account from the Bank that carries current epoch, to calculate stake. -* `account[3]` - R - sysvar::stake\_history account from the Bank that carries stake warmup/cooldown history +- `account[0]` - RW - The StakeState::Stake from which to withdraw. +- `account[1]` - RW - Account that should be credited with the withdrawn lamports. +- `account[2]` - R - sysvar::clock account from the Bank that carries current epoch, to calculate stake. +- `account[3]` - R - sysvar::stake_history account from the Bank that carries stake warmup/cooldown history ## Benefits of the design -* Single vote for all the stakers. -* Clearing of the credit variable is not necessary for claiming rewards. -* Each delegated stake can claim its rewards independently. -* Commission for the work is deposited when a reward is claimed by the delegated stake. +- Single vote for all the stakers. +- Clearing of the credit variable is not necessary for claiming rewards. +- Each delegated stake can claim its rewards independently. +- Commission for the work is deposited when a reward is claimed by the delegated stake. ## Example Callflow -![Passive Staking Callflow](../.gitbook/assets/passive-staking-callflow.svg) +![Passive Staking Callflow](/img/passive-staking-callflow.svg) ## Staking Rewards @@ -171,22 +173,22 @@ Consider the situation of a single stake of 1,000 activated at epoch N, with net At epoch N+1, the amount available to be activated for the network is 400 \(20% of 200\), and at epoch N, this example stake is the only stake activating, and so is entitled to all of the warmup room available. | epoch | effective | activating | total effective | total activating | -| :--- | ---: | ---: | ---: | ---: | -| N-1 | | | 2,000 | 0 | -| N | 0 | 1,000 | 2,000 | 1,000 | -| N+1 | 400 | 600 | 2,400 | 600 | -| N+2 | 880 | 120 | 2,880 | 120 | -| N+3 | 1000 | 0 | 3,000 | 0 | +| :---- | --------: | ---------: | --------------: | ---------------: | +| N-1 | | | 2,000 | 0 | +| N | 0 | 1,000 | 2,000 | 1,000 | +| N+1 | 400 | 600 | 2,400 | 600 | +| N+2 | 880 | 120 | 2,880 | 120 | +| N+3 | 1000 | 0 | 3,000 | 0 | Were 2 stakes \(X and Y\) to activate at epoch N, they would be awarded a portion of the 20% in proportion to their stakes. At each epoch effective and activating for each stake is a function of the previous epoch's state. | epoch | X eff | X act | Y eff | Y act | total effective | total activating | -| :--- | ---: | ---: | ---: | ---: | ---: | ---: | -| N-1 | | | | | 2,000 | 0 | -| N | 0 | 1,000 | 0 | 200 | 2,000 | 1,200 | -| N+1 | 333 | 667 | 67 | 133 | 2,400 | 800 | -| N+2 | 733 | 267 | 146 | 54 | 2,880 | 321 | -| N+3 | 1000 | 0 | 200 | 0 | 3,200 | 0 | +| :---- | ----: | ----: | ----: | ----: | --------------: | ---------------: | +| N-1 | | | | | 2,000 | 0 | +| N | 0 | 1,000 | 0 | 200 | 2,000 | 1,200 | +| N+1 | 333 | 667 | 67 | 133 | 2,400 | 800 | +| N+2 | 733 | 267 | 146 | 54 | 2,880 | 321 | +| N+3 | 1000 | 0 | 200 | 0 | 3,200 | 0 | ### Withdrawal @@ -194,4 +196,4 @@ Only lamports in excess of effective+activating stake may be withdrawn at any ti ### Lock-up -Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as an epoch height, i.e. the minimum epoch height that must be reached by the network before the stake account balance is available for withdrawal, unless the transaction is also signed by a specified custodian. This information is gathered when the stake account is created, and stored in the Lockup field of the stake account's state. Changing the authorized staker or withdrawer is also subject to lock-up, as such an operation is effectively a transfer. +Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as an epoch height, i.e. the minimum epoch height that must be reached by the network before the stake account balance is available for withdrawal, unless the transaction is also signed by a specified custodian. This information is gathered when the stake account is created, and stored in the Lockup field of the stake account's state. Changing the authorized staker or withdrawer is also subject to lock-up, as such an operation is effectively a transfer. diff --git a/docs/src/cluster/synchronization.md b/docs/src/cluster/synchronization.md index cd51d27fe9..4fe230c63b 100644 --- a/docs/src/cluster/synchronization.md +++ b/docs/src/cluster/synchronization.md @@ -1,4 +1,6 @@ -# Synchronization +--- +title: Synchronization +--- Fast, reliable synchronization is the biggest reason Solana is able to achieve such high throughput. Traditional blockchains synchronize on large chunks of transactions called blocks. By synchronizing on blocks, a transaction cannot be processed until a duration called "block time" has passed. In Proof of Work consensus, these block times need to be very large \(~10 minutes\) to minimize the odds of multiple validators producing a new valid block at the same time. There's no such constraint in Proof of Stake consensus, but without reliable timestamps, a validator cannot determine the order of incoming blocks. The popular workaround is to tag each block with a [wallclock timestamp](https://en.bitcoin.it/wiki/Block_timestamp). Because of clock drift and variance in network latencies, the timestamp is only accurate within an hour or two. To workaround the workaround, these systems lengthen block times to provide reasonable certainty that the median timestamp on each block is always increasing. @@ -22,6 +24,5 @@ Proof of History is not a consensus mechanism, but it is used to improve the per ## More on Proof of History -* [water clock analogy](https://medium.com/solana-labs/proof-of-history-explained-by-a-water-clock-e682183417b8) -* [Proof of History overview](https://medium.com/solana-labs/proof-of-history-a-clock-for-blockchain-cf47a61a9274) - +- [water clock analogy](https://medium.com/solana-labs/proof-of-history-explained-by-a-water-clock-e682183417b8) +- [Proof of History overview](https://medium.com/solana-labs/proof-of-history-a-clock-for-blockchain-cf47a61a9274) diff --git a/docs/src/cluster/turbine-block-propagation.md b/docs/src/cluster/turbine-block-propagation.md index 4fadff654b..68dd9aea78 100644 --- a/docs/src/cluster/turbine-block-propagation.md +++ b/docs/src/cluster/turbine-block-propagation.md @@ -1,4 +1,6 @@ -# Turbine Block Propagation +--- +title: Turbine Block Propagation +--- A Solana cluster uses a multi-layer block propagation mechanism called _Turbine_ to broadcast transaction shreds to all nodes with minimal amount of duplicate messages. The cluster divides itself into small collections of nodes, called _neighborhoods_. Each node is responsible for sharing any data it receives with the other nodes in its neighborhood, as well as propagating the data on to a small set of nodes in other neighborhoods. This way each node only has to communicate with a small number of nodes. @@ -20,15 +22,15 @@ This way each node only has to communicate with a maximum of `2 * DATA_PLANE_FAN The following diagram shows how the Leader sends shreds with a Fanout of 2 to Neighborhood 0 in Layer 0 and how the nodes in Neighborhood 0 share their data with each other. -![Leader sends shreds to Neighborhood 0 in Layer 0](../.gitbook/assets/data-plane-seeding.svg) +![Leader sends shreds to Neighborhood 0 in Layer 0](/img/data-plane-seeding.svg) The following diagram shows how Neighborhood 0 fans out to Neighborhoods 1 and 2. -![Neighborhood 0 Fanout to Neighborhood 1 and 2](../.gitbook/assets/data-plane-fanout.svg) +![Neighborhood 0 Fanout to Neighborhood 1 and 2](/img/data-plane-fanout.svg) Finally, the following diagram shows a two layer cluster with a Fanout of 2. -![Two layer cluster with a Fanout of 2](../.gitbook/assets/data-plane.svg) +![Two layer cluster with a Fanout of 2](/img/data-plane.svg) ### Configuration Values @@ -38,59 +40,62 @@ Currently, configuration is set when the cluster is launched. In the future, the ## Calcuating the required FEC rate -Turbine relies on retransmission of packets between validators. Due to +Turbine relies on retransmission of packets between validators. Due to retransmission, any network wide packet loss is compounded, and the probability of the packet failing to reach is destination increases -on each hop. The FEC rate needs to take into account the network wide +on each hop. The FEC rate needs to take into account the network wide packet loss, and the propagation depth. A shred group is the set of data and coding packets that can be used -to reconstruct each other. Each shred group has a chance of failure, +to reconstruct each other. Each shred group has a chance of failure, based on the likelyhood of the number of packets failing that exceeds the FEC rate. If a validator fails to reconstruct the shred group, then the block cannot be reconstructed, and the validator has to rely on repair to fixup the blocks. The probability of the shred group failing can be computed using the -binomial distribution. If the FEC rate is `16:4`, then the group size +binomial distribution. If the FEC rate is `16:4`, then the group size is 20, and at least 4 of the shreds must fail for the group to fail. Which is equal to the sum of the probability of 4 or more trails failing out of 20. Probability of a block succeeding in turbine: -* Probability of packet failure: `P = 1 - (1 - network_packet_loss_rate)^2` -* FEC rate: `K:M` -* Number of trials: `N = K + M` -* Shred group failure rate: `S = SUM of i=0 -> M for binomial(prob_failure = P, trials = N, failures = i)` -* Shreds per block: `G` -* Block success rate: `B = (1 - S) ^ (G / N) ` -* Binomial distribution for exactly `i` results with probability of P in N trials is defined as `(N choose i) * P^i * (1 - P)^(N-i)` +- Probability of packet failure: `P = 1 - (1 - network_packet_loss_rate)^2` +- FEC rate: `K:M` +- Number of trials: `N = K + M` +- Shred group failure rate: `S = SUM of i=0 -> M for binomial(prob_failure = P, trials = N, failures = i)` +- Shreds per block: `G` +- Block success rate: `B = (1 - S) ^ (G / N)` +- Binomial distribution for exactly `i` results with probability of P in N trials is defined as `(N choose i) * P^i * (1 - P)^(N-i)` For example: -* Network packet loss rate is 15%. -* 50kpts network generates 6400 shreds per second. -* FEC rate increases the total shres per block by the FEC ratio. +- Network packet loss rate is 15%. +- 50kpts network generates 6400 shreds per second. +- FEC rate increases the total shres per block by the FEC ratio. With a FEC rate: `16:4` -* `G = 8000` -* `P = 1 - 0.85 * 0.85 = 1 - 0.7225 = 0.2775` -* `S = SUM of i=0 -> 4 for binomial(prob_failure = 0.2775, trials = 20, failures = i) = 0.689414` -* `B = (1 - 0.689) ^ (8000 / 20) = 10^-203` + +- `G = 8000` +- `P = 1 - 0.85 * 0.85 = 1 - 0.7225 = 0.2775` +- `S = SUM of i=0 -> 4 for binomial(prob_failure = 0.2775, trials = 20, failures = i) = 0.689414` +- `B = (1 - 0.689) ^ (8000 / 20) = 10^-203` With FEC rate of `16:16` -* `G = 12800` -* `S = SUM of i=0 -> 32 for binomial(prob_failure = 0.2775, trials = 64, failures = i) = 0.002132` -* `B = (1 - 0.002132) ^ (12800 / 32) = 0.42583` + +- `G = 12800` +- `S = SUM of i=0 -> 32 for binomial(prob_failure = 0.2775, trials = 64, failures = i) = 0.002132` +- `B = (1 - 0.002132) ^ (12800 / 32) = 0.42583` With FEC rate of `32:32` -* `G = 12800` -* `S = SUM of i=0 -> 32 for binomial(prob_failure = 0.2775, trials = 64, failures = i) = 0.000048` -* `B = (1 - 0.000048) ^ (12800 / 64) = 0.99045` + +- `G = 12800` +- `S = SUM of i=0 -> 32 for binomial(prob_failure = 0.2775, trials = 64, failures = i) = 0.000048` +- `B = (1 - 0.000048) ^ (12800 / 64) = 0.99045` ## Neighborhoods The following diagram shows how two neighborhoods in different layers interact. To cripple a neighborhood, enough nodes \(erasure codes +1\) from the neighborhood above need to fail. Since each neighborhood receives shreds from multiple nodes in a neighborhood in the upper layer, we'd need a big network failure in the upper layers to end up with incomplete data. -![Inner workings of a neighborhood](../.gitbook/assets/data-plane-neighborhood.svg) +![Inner workings of a neighborhood](/img/data-plane-neighborhood.svg) diff --git a/docs/src/cluster/vote-signing.md b/docs/src/cluster/vote-signing.md index 7d1a64a5cf..976df0e64b 100644 --- a/docs/src/cluster/vote-signing.md +++ b/docs/src/cluster/vote-signing.md @@ -1,4 +1,6 @@ -# Secure Vote Signing +--- +title: Secure Vote Signing +--- A validator receives entries from the current leader and submits votes confirming those entries are valid. This vote submission presents a security challenge, because forged votes that violate consensus rules could be used to slash the validator's stake. @@ -20,30 +22,30 @@ Currently, there is a 1:1 relationship between validators and vote signers, and The vote signing service consists of a JSON RPC server and a request processor. At startup, the service starts the RPC server at a configured port and waits for validator requests. It expects the following type of requests: 1. Register a new validator node -* The request must contain validator's identity \(public key\) -* The request must be signed with the validator's private key -* The service drops the request if signature of the request cannot be +- The request must contain validator's identity \(public key\) +- The request must be signed with the validator's private key +- The service drops the request if signature of the request cannot be verified -* The service creates a new voting asymmetric key for the validator, and +- The service creates a new voting asymmetric key for the validator, and returns the public key as a response -* If a validator tries to register again, the service returns the public key +- If a validator tries to register again, the service returns the public key from the pre-existing keypair 1. Sign a vote -* The request must contain a voting transaction and all verification data -* The request must be signed with the validator's private key -* The service drops the request if signature of the request cannot be +- The request must contain a voting transaction and all verification data +- The request must be signed with the validator's private key +- The service drops the request if signature of the request cannot be verified -* The service verifies the voting data -* The service returns a signature for the transaction +- The service verifies the voting data +- The service returns a signature for the transaction ## Validator voting @@ -64,4 +66,3 @@ The validator looks up the votes submitted by all the nodes in the cluster for t ### New Vote Signing The validator creates a "new vote" transaction and sends it to the signing service using JSON RPC. The RPC request also includes the vote verification data. On success, the RPC call returns the signature for the vote. On failure, RPC call returns the failure code. - diff --git a/docs/src/clusters.md b/docs/src/clusters.md index f313249c21..6d4f02d35a 100644 --- a/docs/src/clusters.md +++ b/docs/src/clusters.md @@ -1,34 +1,40 @@ -# Solana Clusters +--- +title: Solana Clusters +--- + Solana maintains several different clusters with different purposes. Before you begin make sure you have first [installed the Solana command line tools](cli/install-solana-cli-tools.md) Explorers: -* [http://explorer.solana.com/](https://explorer.solana.com/). -* [http://solanabeach.io/](http://solanabeach.io/). + +- [http://explorer.solana.com/](https://explorer.solana.com/). +- [http://solanabeach.io/](http://solanabeach.io/). ## Devnet -* Devnet serves as a playground for anyone who wants to take Solana for a -test drive, as a user, token holder, app developer, or validator. -* Application developers should target Devnet. -* Potential validators should first target Devnet. -* Key differences between Devnet and Mainnet Beta: - * Devnet tokens are **not real** - * Devnet includes a token faucet for airdrops for application testing - * Devnet may be subject to ledger resets - * Devnet typically runs a newer software version than Mainnet Beta - * Devnet may be maintained by different validators than Mainnet Beta - * Gossip entrypoint for Devnet: `devnet.solana.com:8001` - * RPC URL for Devnet: `https://devnet.solana.com` +- Devnet serves as a playground for anyone who wants to take Solana for a + test drive, as a user, token holder, app developer, or validator. +- Application developers should target Devnet. +- Potential validators should first target Devnet. +- Key differences between Devnet and Mainnet Beta: + - Devnet tokens are **not real** + - Devnet includes a token faucet for airdrops for application testing + - Devnet may be subject to ledger resets + - Devnet typically runs a newer software version than Mainnet Beta + - Devnet may be maintained by different validators than Mainnet Beta +- Gossip entrypoint for Devnet: `devnet.solana.com:8001` +- RPC URL for Devnet: `https://devnet.solana.com` ##### Example `solana` command-line configuration + ```bash solana config set --url https://devnet.solana.com ``` ##### Example `solana-validator` command-line + ```bash $ solana-validator \ --identity ~/validator-keypair.json \ @@ -46,29 +52,30 @@ $ solana-validator \ The `--trusted-validator`s is operated by Solana - ## Testnet -* Testnet is where we stress test recent release features on a live -cluster, particularly focused on network performance, stability and validator -behavior. -* [Tour de SOL](tour-de-sol/README.md) initiative runs on Testnet, where we -encourage malicious behavior and attacks on the network to help us find and -squash bugs or network vulnerabilities. -* Testnet tokens are **not real** -* Testnet may be subject to ledger resets. -* Testnet typically runs a newer software release than both Devnet and -Mainnet Beta -* Testnet may be maintained by different validators than Mainnet Beta -* Gossip entrypoint for Testnet: `35.203.170.30:8001` -* RPC URL for Testnet: `https://testnet.solana.com` +- Testnet is where we stress test recent release features on a live + cluster, particularly focused on network performance, stability and validator + behavior. +- [Tour de SOL](tour-de-sol/README.md) initiative runs on Testnet, where we + encourage malicious behavior and attacks on the network to help us find and + squash bugs or network vulnerabilities. +- Testnet tokens are **not real** +- Testnet may be subject to ledger resets. +- Testnet typically runs a newer software release than both Devnet and + Mainnet Beta +- Testnet may be maintained by different validators than Mainnet Beta +- Gossip entrypoint for Testnet: `35.203.170.30:8001` +- RPC URL for Testnet: `https://testnet.solana.com` ##### Example `solana` command-line configuration + ```bash solana config set --url https://testnet.solana.com ``` ##### Example `solana-validator` command-line + ```bash $ solana-validator \ --identity ~/validator-keypair.json \ @@ -87,28 +94,33 @@ $ solana-validator \ ``` The identity of the `--trusted-validator`s are: -* `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - testnet.solana.com (Solana) -* `Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN` - Certus One -* `9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv` - Algo|Stake + +- `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - testnet.solana.com (Solana) +- `Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN` - Certus One +- `9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv` - Algo|Stake ## Mainnet Beta + A permissionless, persistent cluster for early token holders and launch partners. Currently smart contracts, rewards, and inflation are disabled. - * Tokens that are issued on Mainnet Beta are **real** SOL - * If you have paid money to purchase/be issued tokens, such as through our - CoinList auction, these tokens will be transferred on Mainnet Beta. - * Note: If you are using a non-command-line wallet such as - [Trust Wallet](wallet-guide/trust-wallet.md), - the wallet will always be connecting to Mainnet Beta. - * Gossip entrypoint for Mainnet Beta: `mainnet-beta.solana.com:8001` - * RPC URL for Mainnet Beta: `https://api.mainnet-beta.solana.com` + +- Tokens that are issued on Mainnet Beta are **real** SOL +- If you have paid money to purchase/be issued tokens, such as through our + CoinList auction, these tokens will be transferred on Mainnet Beta. + - Note: If you are using a non-command-line wallet such as + [Trust Wallet](wallet-guide/trust-wallet.md), + the wallet will always be connecting to Mainnet Beta. +- Gossip entrypoint for Mainnet Beta: `mainnet-beta.solana.com:8001` +- RPC URL for Mainnet Beta: `https://api.mainnet-beta.solana.com` ##### Example `solana` command-line configuration + ```bash solana config set --url https://api.mainnet-beta.solana.com ``` ##### Example `solana-validator` command-line + ```bash $ solana-validator \ --identity ~/validator-keypair.json \ diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css new file mode 100644 index 0000000000..65452e5272 --- /dev/null +++ b/docs/src/css/custom.css @@ -0,0 +1,69 @@ +/* stylelint-disable docusaurus/copyright-header */ +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ + +/* You can override the default Infima variables here. */ + +@import url('https://fonts.googleapis.com/css2?family=Roboto'); + +:root { + --ifm-color-primary: #25c2a0; + --ifm-color-primary-dark: #409088; + --ifm-color-primary-darker: #387462; + --ifm-color-primary-darkest: #1b4e3f; + --ifm-color-primary-light: #42ba96; + --ifm-color-primary-lighter: #86b8b6; + --ifm-color-primary-lightest: #abd5c6; + --ifm-code-font-size: 95%; + --ifm-spacing-horizontal: 1em; + --ifm-font-family-base: "Roboto", system-ui, -apple-system, Segoe UI, Roboto, Ubuntu, Cantarell, Noto Sans, sans-serif, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; + --ifm-footer-background-color: #232323; +} + + + + +@keyframes fadeInUp { + 0% { opacity: 0; transform: translateY(1.5rem); } +} + +main { + margin: 1rem 0 5rem 0; +} + +.docusaurus-highlight-code-line { + background-color: rgb(72, 77, 91); + display: block; + margin: 0 calc(-1 * var(--ifm-pre-padding)); + padding: 0 var(--ifm-pre-padding); +} + +.card { + padding: 1rem; + margin-top: 2rem; + animation: fadeInUp 400ms backwards; + animation-delay: 150ms; + transition-property: all; + transition-duration: 200ms; + box-shadow: 0 8px 28px 4px rgba(86,91,115,0.15); +} + + +.card a { + text-decoration: none; +} + +.card:hover { + transform: translate(0px, -5px); +} + +.footer--dark { + background-color: #232323 !important; +} + +footer .text--center { + padding: 2rem 0 0 0; +} \ No newline at end of file diff --git a/docs/src/file-system-wallet/README.md b/docs/src/file-system-wallet/README.md index 8d47f5cd86..27da2004fb 100644 --- a/docs/src/file-system-wallet/README.md +++ b/docs/src/file-system-wallet/README.md @@ -1,15 +1,15 @@ -# File System Wallet +--- +title: File System Wallet +--- This document describes how to create and use a file system wallet with the -Solana CLI tools. A file system wallet exists as an unencrypted keypair file +Solana CLI tools. A file system wallet exists as an unencrypted keypair file on your computer system's filesystem. -{% hint style="info" %} -File system wallets are the **least secure** method of storing SOL tokens. -Storing large amounts of tokens in a file system wallet is **not recommended**. -{% endhint %} +> File system wallets are the **least secure** method of storing SOL tokens. Storing large amounts of tokens in a file system wallet is **not recommended**. ## Before you Begin + Make sure you have [installed the Solana Command Line Tools](../cli/install-solana-cli-tools.md) @@ -40,8 +40,8 @@ ErRr1caKzK8L8nn4xmEWtimYRiTCAZXjBtVphuZ5vMKy ``` This is the public key corresponding to the keypair in -`~/my-solana-wallet/my-keypair.json`. The public key of the keypair file is -your *wallet address*. +`~/my-solana-wallet/my-keypair.json`. The public key of the keypair file is +your _wallet address_. ## Verify your Address against your Keypair file @@ -57,7 +57,8 @@ The command will output "Success" if the given address matches the the one in your keypair file, and "Failed" otherwise. ## Creating Multiple File System Wallet Addresses -You can create as many wallet addresses as you like. Simply re-run the + +You can create as many wallet addresses as you like. Simply re-run the steps in [Generate a File System Wallet](#generate-a-file-system-wallet-keypair) and make sure to use a new filename or path with the `--outfile` argument. Multiple wallet addresses can be useful if you want to transfer tokens between diff --git a/docs/src/hardware-wallets/README.md b/docs/src/hardware-wallets/README.md index ac18238b7f..94e3ef0e3d 100644 --- a/docs/src/hardware-wallets/README.md +++ b/docs/src/hardware-wallets/README.md @@ -1,22 +1,25 @@ -# Hardware Wallets +--- +title: Hardware Wallets +--- Signing a transaction requires a private key, but storing a private key on your personal computer or phone leaves it subject to theft. Adding a password to your key adds security, but many people prefer to take it a step further and move their private keys to a separate -physical device called a *hardware wallet*. A hardware wallet is a +physical device called a _hardware wallet_. A hardware wallet is a small handheld device that stores private keys and provides some interface for signing transactions. The Solana CLI has first class support for hardware wallets. Anywhere you use a keypair filepath (denoted as `` in usage docs), you -can pass a *keypair URL* that uniquely identifies a keypair in a +can pass a _keypair URL_ that uniquely identifies a keypair in a hardware wallet. ## Supported Hardware Wallets The Solana CLI supports the following hardware wallets: - - [Ledger Nano S](ledger.md) + +- [Ledger Nano S](ledger.md) ## Specify a Keypair URL @@ -44,7 +47,7 @@ usb://ledger/BsNsvfXqQTtJnagwFWdBS7FBXgnsK8VZ5CmuznN85swK?key=0/0 All derivation paths implicitly include the prefix `44'/501'`, which indicates the path follows the [BIP44 specifications](https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki) -and that any derived keys are Solana keys (Coin type 501). The single quote +and that any derived keys are Solana keys (Coin type 501). The single quote indicates a "hardened" derivation. Because Solana uses Ed25519 keypairs, all derivations are hardened and therefore adding the quote is optional and unnecessary. diff --git a/docs/src/hardware-wallets/ledger.md b/docs/src/hardware-wallets/ledger.md index f595d247fc..b9124e93ae 100644 --- a/docs/src/hardware-wallets/ledger.md +++ b/docs/src/hardware-wallets/ledger.md @@ -1,4 +1,6 @@ -# Ledger Hardware Wallet +--- +title: Ledger Hardware Wallet +--- The Ledger Nano S hardware wallet offers secure storage of your Solana private keys. The Solana Ledger app enables derivation of essentially infinite keys, and @@ -27,10 +29,10 @@ solana-keygen pubkey usb://ledger This confirms your Ledger device is connected properly and in the correct state to interact with the Solana CLI. The command returns your Ledger's unique -*wallet ID*. When you have multiple Nano S devices connected to the same +_wallet ID_. When you have multiple Nano S devices connected to the same computer, you can use your wallet ID to specify which Ledger hardware wallet -you want to use. If you only plan to use a single Nano S on your computer -at a time, you don't need to include the wallet ID. For information on +you want to use. If you only plan to use a single Nano S on your computer +at a time, you don't need to include the wallet ID. For information on using the wallet ID to use a specific Ledger, see [Manage Multiple Hardware Wallets](#manage-multiple-hardware-wallets). @@ -45,7 +47,7 @@ your own accounts for different purposes, or use different keypairs on the device as signing authorities for a stake account, for example. All of the following commands will display different addresses, associated with -the keypair path given. Try them out! +the keypair path given. Try them out! ```bash solana-keygen pubkey usb://ledger @@ -62,8 +64,9 @@ Just make a note of which keypair URL you used to derive any address you will be using to receive tokens. If you are only planning to use a single address/keypair on your device, a good -easy-to-remember path might be to use the address at `key=0`. View this address +easy-to-remember path might be to use the address at `key=0`. View this address with: + ```bash solana-keygen pubkey usb://ledger?key=0 ``` @@ -76,12 +79,14 @@ associated keypair URL as the signer for transactions from that address. To view the balance of any account, regardless of which wallet it uses, use the `solana balance` command: + ```bash solana balance SOME_WALLET_ADDRESS ``` For example, if your address is `7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri`, then enter the following command to view the balance: + ```bash solana balance 7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri ``` @@ -91,15 +96,15 @@ You can also view the balance of any account address on the Accounts tab in the and paste the address in the box to view the balance in you web browser. Note: Any address with a balance of 0 SOL, such as a newly created one on your -Ledger, will show as "Not Found" in the explorer. Empty accounts and non-existent -accounts are treated the same in Solana. This will change when your account +Ledger, will show as "Not Found" in the explorer. Empty accounts and non-existent +accounts are treated the same in Solana. This will change when your account address has some SOL in it. ### Send SOL from a Ledger Nano S To send some tokens from an address controlled by your Nano S device, you will need to use the device to sign a transaction, using the same keypair URL you -used to derive the address. To do this, make sure your Nano S is plugged in, +used to derive the address. To do this, make sure your Nano S is plugged in, unlocked with the PIN, Ledger Live is not running, and the Solana App is open on the device, showing "Application is Ready". @@ -112,12 +117,12 @@ from the associated address will decrease. solana transfer RECIPIENT_ADDRESS AMOUNT --keypair KEYPAIR_URL_OF_SENDER ``` -Below is a full example. First, an address is viewed at a certain keypair URL. -Second, the balance of tht address is checked. Lastly, a transfer transaction +Below is a full example. First, an address is viewed at a certain keypair URL. +Second, the balance of tht address is checked. Lastly, a transfer transaction is entered to send `1` SOL to the recipient address `7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri`. When you hit Enter for a transfer command, you will be prompted to approve the -transaction details on your Ledger device. On the device, use the right and -left buttons to review the transaction details. If they look correct, click +transaction details on your Ledger device. On the device, use the right and +left buttons to review the transaction details. If they look correct, click both buttons on the "Approve" screen, otherwise push both buttons on the "Reject" screen. @@ -137,8 +142,8 @@ Signature: kemu9jDEuPirKNRKiHan7ycybYsZp7pFefAdvWZRq5VRHCLgXTXaFVw3pfh87MQcWX4kQ After approving the transaction on your device, the program will display the transaction signature, and wait for the maximum number of confirmations (32) -before returning. This only takes a few seconds, and then the transaction is -finalized on the Solana network. You can view details of this or any other +before returning. This only takes a few seconds, and then the transaction is +finalized on the Solana network. You can view details of this or any other transaction by going to the Transaction tab in the [Explorer](https://explorer.solana.com/transactions) and paste in the transaction signature. @@ -148,7 +153,7 @@ and paste in the transaction signature. ### Manage Multiple Hardware Wallets It is sometimes useful to sign a transaction with keys from multiple hardware -wallets. Signing with multiple wallets requires *fully qualified keypair URLs*. +wallets. Signing with multiple wallets requires _fully qualified keypair URLs_. When the URL is not fully qualified, the Solana CLI will prompt you with the fully qualified URLs of all connected hardware wallets, and ask you to choose which wallet to use for each signature. @@ -183,7 +188,7 @@ on one of the public testnets. You can use the command-line to install the latest Solana Ledger app release before it has been validated by -the Ledger team and made available via Ledger Live. Note that because the app +the Ledger team and made available via Ledger Live. Note that because the app is not installed via Ledger Live, you will need to approve installation from an "unsafe" manager, as well as see the message, "This app is not genuine" each time you open the app. Once the app is available on Ledger Live, you can @@ -262,9 +267,6 @@ solana-keygen pubkey usb://ledger\?key=0 Check out our [Wallet Support Page](../wallet-guide/support.md) for ways to get help. - - - Read more about [sending and receiving tokens](../cli/transfer-tokens.md) and [delegating stake](../cli/delegate-stake.md). You can use your Ledger keypair URL -anywhere you see an option or argument that accepts a ``. \ No newline at end of file +anywhere you see an option or argument that accepts a ``. diff --git a/docs/src/history.md b/docs/src/history.md index d83ad5c182..a164996acd 100644 --- a/docs/src/history.md +++ b/docs/src/history.md @@ -1,4 +1,6 @@ -# History of the Solana Codebase +--- +title: History of the Solana Codebase +--- In November of 2017, Anatoly Yakovenko published a whitepaper describing Proof of History, a technique for keeping time between computers that do not trust diff --git a/docs/src/implemented-proposals/README.md b/docs/src/implemented-proposals/README.md index f47169fea0..839697a6e7 100644 --- a/docs/src/implemented-proposals/README.md +++ b/docs/src/implemented-proposals/README.md @@ -1,4 +1,5 @@ -# Implemented Design Proposals +--- +title: Implemented Design Proposals +--- The following design proposals are fully implemented. - diff --git a/docs/src/implemented-proposals/abi-management.md b/docs/src/implemented-proposals/abi-management.md index 6cbd1fd1b9..f4b143db19 100644 --- a/docs/src/implemented-proposals/abi-management.md +++ b/docs/src/implemented-proposals/abi-management.md @@ -1,4 +1,6 @@ -# Solana ABI management process +--- +title: Solana ABI management process +--- This document proposes the Solana ABI management process. The ABI management process is an engineering practice and a supporting technical framework to avoid @@ -109,7 +111,7 @@ This part is a bit complex. There is three inter-depending parts: `AbiExample`, First, the generated test creates an example instance of the digested type with a trait called `AbiExample`, which should be implemented for all of digested types like the `Serialize` and return `Self` like the `Default` trait. Usually, -it's provided via generic trait specialization for most of common types. Also +it's provided via generic trait specialization for most of common types. Also it is possible to `derive` for `struct` and `enum` and can be hand-written if needed. diff --git a/docs/src/implemented-proposals/commitment.md b/docs/src/implemented-proposals/commitment.md index 5a7816b10b..a404aded2f 100644 --- a/docs/src/implemented-proposals/commitment.md +++ b/docs/src/implemented-proposals/commitment.md @@ -1,4 +1,6 @@ -# Commitment +--- +title: Commitment +--- The commitment metric aims to give clients a measure of the network confirmation and stake levels on a particular block. Clients can then use this information to @@ -47,9 +49,10 @@ banks are not included in the commitment calculations here. Now we can naturally augment the above computation to also build a `BlockCommitment` array for every bank `b` by: -1) Adding a `ForkCommitmentCache` to collect the `BlockCommitment` structs -2) Replacing `f` with `f'` such that the above computation also builds this -`BlockCommitment` for every bank `b`. + +1. Adding a `ForkCommitmentCache` to collect the `BlockCommitment` structs +2. Replacing `f` with `f'` such that the above computation also builds this + `BlockCommitment` for every bank `b`. We will proceed with the details of 2) as 1) is trivial. @@ -75,6 +78,7 @@ Now more specifically, we augment the above computation to: ``` where `f'` is defined as: + ```text fn f`( stake: &mut Stake, diff --git a/docs/src/implemented-proposals/cross-program-invocation.md b/docs/src/implemented-proposals/cross-program-invocation.md index 8c899221b7..2fed17292f 100644 --- a/docs/src/implemented-proposals/cross-program-invocation.md +++ b/docs/src/implemented-proposals/cross-program-invocation.md @@ -1,4 +1,6 @@ -# Cross-Program Invocation +--- +title: Cross-Program Invocation +--- ## Problem @@ -67,13 +69,13 @@ mod acme { `invoke()` is built into Solana's runtime and is responsible for routing the given instruction to the `token` program via the instruction's `program_id` field. -Before invoking `pay()`, the runtime must ensure that `acme` didn't modify any accounts owned by `token`. It does this by applying the runtime's policy to the current state of the accounts at the time `acme` calls `invoke` vs. the initial state of the accounts at the beginning of the `acme`'s instruction. After `pay()` completes, the runtime must again ensure that `token` didn't modify any accounts owned by `acme` by again applying the runtime's policy, but this time with the `token` program ID. Lastly, after `pay_and_launch_missiles()` completes, the runtime must apply the runtime policy one more time, where it normally would, but using all updated `pre_*` variables. If executing `pay_and_launch_missiles()` up to `pay()` made no invalid account changes, `pay()` made no invalid changes, and executing from `pay()` until `pay_and_launch_missiles()` returns made no invalid changes, then the runtime can transitively assume `pay_and_launch_missiles()` as whole made no invalid account changes, and therefore commit all these account modifications. +Before invoking `pay()`, the runtime must ensure that `acme` didn't modify any accounts owned by `token`. It does this by applying the runtime's policy to the current state of the accounts at the time `acme` calls `invoke` vs. the initial state of the accounts at the beginning of the `acme`'s instruction. After `pay()` completes, the runtime must again ensure that `token` didn't modify any accounts owned by `acme` by again applying the runtime's policy, but this time with the `token` program ID. Lastly, after `pay_and_launch_missiles()` completes, the runtime must apply the runtime policy one more time, where it normally would, but using all updated `pre_*` variables. If executing `pay_and_launch_missiles()` up to `pay()` made no invalid account changes, `pay()` made no invalid changes, and executing from `pay()` until `pay_and_launch_missiles()` returns made no invalid changes, then the runtime can transitively assume `pay_and_launch_missiles()` as whole made no invalid account changes, and therefore commit all these account modifications. ### Instructions that require privileges -The runtime uses the privileges granted to the caller program to determine what privileges can be extended to the callee. Privileges in this context refer to signers and writable accounts. For example, if the instruction the caller is processing contains a signer or writable account, then the caller can invoke an instruction that also contains that signer and/or writable account. +The runtime uses the privileges granted to the caller program to determine what privileges can be extended to the callee. Privileges in this context refer to signers and writable accounts. For example, if the instruction the caller is processing contains a signer or writable account, then the caller can invoke an instruction that also contains that signer and/or writable account. -This privilege extension relies on the fact that programs are immutable. In the case of the `acme` program, the runtime can safely treat the transaction's signature as a signature of a `token` instruction. When the runtime sees the `token` instruction references `alice_pubkey`, it looks up the key in the `acme` instruction to see if that key corresponds to a signed account. In this case, it does and thereby authorizes the `token` program to modify Alice's account. +This privilege extension relies on the fact that programs are immutable. In the case of the `acme` program, the runtime can safely treat the transaction's signature as a signature of a `token` instruction. When the runtime sees the `token` instruction references `alice_pubkey`, it looks up the key in the `acme` instruction to see if that key corresponds to a signed account. In this case, it does and thereby authorizes the `token` program to modify Alice's account. ### Program signed accounts @@ -86,11 +88,11 @@ To sign an account with program derived addresses, a program may `invoke_signed( invoke_signed( &instruction, accounts, - &[&["First addresses seed"], + &[&["First addresses seed"], &["Second addresses first seed", "Second addresses second seed"]], )?; ``` ### Reentrancy -Reentrancy is currently limited to direct self recursion capped at a fixed depth. This restriction prevents situations where a program might invoke another from an intermediary state without the knowledge that it might later be called back into. Direct recursion gives the program full control of its state at the point that it gets called back. +Reentrancy is currently limited to direct self recursion capped at a fixed depth. This restriction prevents situations where a program might invoke another from an intermediary state without the knowledge that it might later be called back into. Direct recursion gives the program full control of its state at the point that it gets called back. diff --git a/docs/src/implemented-proposals/durable-tx-nonces.md b/docs/src/implemented-proposals/durable-tx-nonces.md index 43d470e6f8..72aa701f39 100644 --- a/docs/src/implemented-proposals/durable-tx-nonces.md +++ b/docs/src/implemented-proposals/durable-tx-nonces.md @@ -1,4 +1,6 @@ -# Durable Transaction Nonces +--- +title: Durable Transaction Nonces +--- ## Problem @@ -11,8 +13,8 @@ offline network participants. ## Requirements -1) The transaction's signature needs to cover the nonce value -2) The nonce must not be reusable, even in the case of signing key disclosure +1. The transaction's signature needs to cover the nonce value +2. The nonce must not be reusable, even in the case of signing key disclosure ## A Contract-based Solution @@ -25,8 +27,8 @@ When making use of a durable nonce, the client must first query its value from account data. A transaction is now constructed in the normal way, but with the following additional requirements: - 1) The durable nonce value is used in the `recent_blockhash` field - 2) An `AdvanceNonceAccount` instruction is the first issued in the transaction +1. The durable nonce value is used in the `recent_blockhash` field +2. An `AdvanceNonceAccount` instruction is the first issued in the transaction ### Contract Mechanics @@ -63,7 +65,7 @@ WithdrawInstruction(to, lamports) success ``` -A client wishing to use this feature starts by creating a nonce account under +A client wishing to use this feature starts by creating a nonce account under the system program. This account will be in the `Uninitialized` state with no stored hash, and thus unusable. @@ -95,11 +97,7 @@ can be changed using the `AuthorizeNonceAccount` instruction. It takes one param the `Pubkey` of the new authority. Executing this instruction grants full control over the account and its balance to the new authority. -{% hint style="info" %} -`AdvanceNonceAccount`, `WithdrawNonceAccount` and `AuthorizeNonceAccount` all require the current -[nonce authority](../offline-signing/durable-nonce.md#nonce-authority) for the -account to sign the transaction. -{% endhint %} +> `AdvanceNonceAccount`, `WithdrawNonceAccount` and `AuthorizeNonceAccount` all require the current [nonce authority](../offline-signing/durable-nonce.md#nonce-authority) for the account to sign the transaction. ### Runtime Support @@ -114,11 +112,11 @@ instruction as the first instruction in the transaction. If the runtime determines that a Durable Transaction Nonce is in use, it will take the following additional actions to validate the transaction: - 1) The `NonceAccount` specified in the `Nonce` instruction is loaded. - 2) The `NonceState` is deserialized from the `NonceAccount`'s data field and -confirmed to be in the `Initialized` state. - 3) The nonce value stored in the `NonceAccount` is tested to match against the -one specified in the transaction's `recent_blockhash` field. +1. The `NonceAccount` specified in the `Nonce` instruction is loaded. +2. The `NonceState` is deserialized from the `NonceAccount`'s data field and + confirmed to be in the `Initialized` state. +3. The nonce value stored in the `NonceAccount` is tested to match against the + one specified in the transaction's `recent_blockhash` field. If all three of the above checks succeed, the transaction is allowed to continue validation. diff --git a/docs/src/implemented-proposals/ed_overview/README.md b/docs/src/implemented-proposals/ed_overview/README.md index d792e7669a..e94bfe0eaa 100644 --- a/docs/src/implemented-proposals/ed_overview/README.md +++ b/docs/src/implemented-proposals/ed_overview/README.md @@ -1,4 +1,6 @@ -# Cluster Economics +--- +title: Cluster Economics +--- **Subject to change.** @@ -12,6 +14,6 @@ Transaction fees are market-based participant-to-participant transfers, attached A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/README.md), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md). Also, the section titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunities and marketplace. Additionally, in [Storage Rent Economics](ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. -![](../../.gitbook/assets/economic_design_infl_230719.png) +![](/img/economic_design_infl_230719.png) **Figure 1**: Schematic overview of Solana economic incentive design. diff --git a/docs/src/implemented-proposals/ed_overview/ed_economic_sustainability.md b/docs/src/implemented-proposals/ed_overview/ed_economic_sustainability.md index 329ab6957f..6e407be262 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_economic_sustainability.md +++ b/docs/src/implemented-proposals/ed_overview/ed_economic_sustainability.md @@ -1,4 +1,6 @@ -# Economic Sustainability +--- +title: Economic Sustainability +--- **Subject to change.** diff --git a/docs/src/implemented-proposals/ed_overview/ed_mvp.md b/docs/src/implemented-proposals/ed_overview/ed_mvp.md index a57b3b42e6..0858c2a27d 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_mvp.md +++ b/docs/src/implemented-proposals/ed_overview/ed_mvp.md @@ -1,4 +1,6 @@ -# Economic Design MVP +--- +title: Economic Design MVP +--- **Subject to change.** @@ -6,7 +8,7 @@ The preceding sections, outlined in the [Economic Design Overview](../README.md) ## MVP Economic Features -* Faucet to deliver testnet SOLs to validators for staking and application development. -* Mechanism by which validators are rewarded via network inflation. -* Ability to delegate tokens to validator nodes -* Validator set commission fees on interest from delegated tokens. +- Faucet to deliver testnet SOLs to validators for staking and application development. +- Mechanism by which validators are rewarded via network inflation. +- Ability to delegate tokens to validator nodes +- Validator set commission fees on interest from delegated tokens. diff --git a/docs/src/implemented-proposals/ed_overview/ed_references.md b/docs/src/implemented-proposals/ed_overview/ed_references.md index 41b46a45a1..4c9047ae38 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_references.md +++ b/docs/src/implemented-proposals/ed_overview/ed_references.md @@ -1,6 +1,7 @@ -# References +--- +title: References +--- 1. [https://blog.ethereum.org/2016/07/27/inflation-transaction-fees-cryptocurrency-monetary-policy/](https://blog.ethereum.org/2016/07/27/inflation-transaction-fees-cryptocurrency-monetary-policy/) 2. [https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281](https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281) 3. [https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281](https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281) - diff --git a/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md b/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md index 56eab1a3be..f19246a273 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md +++ b/docs/src/implemented-proposals/ed_overview/ed_storage_rent_economics.md @@ -1,4 +1,6 @@ -## Storage Rent Economics +--- +title: Storage Rent Economics +--- Each transaction that is submitted to the Solana ledger imposes costs. Transaction fees paid by the submitter, and collected by a validator, in theory, account for the acute, transactional, costs of validating and adding that data to the ledger. Unaccounted in this process is the mid-term storage of active ledger state, necessarily maintained by the rotating validator set. This type of storage imposes costs not only to validators but also to the broader network as active state grows so does data transmission and validation overhead. To account for these costs, we describe here our preliminary design and implementation of storage rent. @@ -13,6 +15,3 @@ Method 2: Pay per byte If an account has less than two-years worth of deposited rent the network charges rent on a per-epoch basis, in credit for the next epoch. This rent is deducted at a rate specified in genesis, in lamports per kilobyte-year. For information on the technical implementation details of this design, see the [Rent](../rent.md) section. - - - diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md index b371a0c3fd..db4123b8db 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md +++ b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/README.md @@ -1,8 +1,9 @@ -# Validation-client Economics +--- +title: Validation-client Economics +--- **Subject to change.** Validator-clients are eligible to receive protocol-based \(i.e. inflation-based\) rewards issued via stake-based annual interest rates \(calculated per epoch\) by providing compute \(CPU+GPU\) resources to validate and vote on a given PoH state. These protocol-based rewards are determined through an algorithmic disinflationary schedule as a function of total amount of circulating tokens. The network is expected to launch with an annual inflation rate around 15%, set to decrease by 15% per year until a long-term stable rate of 1-2% is reached. These issuances are to be split and distributed to participating validators, with around 90% of the issued tokens allocated for validator rewards. Because the network will be distributing a fixed amount of inflation rewards across the stake-weighted validator set, any individual validator's interest rate will be a function of the amount of staked SOL in relation to the circulating SOL. Additionally, validator clients may earn revenue through fees via state-validation transactions. For clarity, we separately describe the design and motivation of these revenue distributions for validation-clients below: state-validation protocol-based rewards and state-validation transaction fees and rent. - diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md index 0e4e9835bc..72c233d132 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md +++ b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md @@ -1,33 +1,35 @@ -# State-validation Protocol-based Rewards +--- +title: State-validation Protocol-based Rewards +--- **Subject to change.** Validator-clients have two functional roles in the Solana network: -* Validate \(vote\) the current global state of that PoH. -* Be elected as ‘leader’ on a stake-weighted round-robin schedule during which time they are responsible for collecting outstanding transactions and incorporating them into the PoH, thus updating the global state of the network and providing chain continuity. +- Validate \(vote\) the current global state of that PoH. +- Be elected as ‘leader’ on a stake-weighted round-robin schedule during which time they are responsible for collecting outstanding transactions and incorporating them into the PoH, thus updating the global state of the network and providing chain continuity. Validator-client rewards for these services are to be distributed at the end of each Solana epoch. As previously discussed, compensation for validator-clients is provided via a protocol-based annual inflation rate dispersed in proportion to the stake-weight of each validator \(see below\) along with leader-claimed transaction fees available during each leader rotation. I.e. during the time a given validator-client is elected as leader, it has the opportunity to keep a portion of each transaction fee, less a protocol-specified amount that is destroyed \(see [Validation-client State Transaction Fees](ed_vce_state_validation_transaction_fees.md)\). The effective protocol-based annual interest rate \(%\) per epoch received by validation-clients is to be a function of: -* the current global inflation rate, derived from the pre-determined dis-inflationary issuance schedule \(see [Validation-client Economics](README.md)\) -* the fraction of staked SOLs out of the current total circulating supply, -* the up-time/participation \[% of available slots that validator had opportunity to vote on\] of a given validator over the previous epoch. +- the current global inflation rate, derived from the pre-determined dis-inflationary issuance schedule \(see [Validation-client Economics](README.md)\) +- the fraction of staked SOLs out of the current total circulating supply, +- the up-time/participation \[% of available slots that validator had opportunity to vote on\] of a given validator over the previous epoch. The first factor is a function of protocol parameters only \(i.e. independent of validator behavior in a given epoch\) and results in a global validation reward schedule designed to incentivize early participation, provide clear monetary stability and provide optimal security in the network. At any given point in time, a specific validator's interest rate can be determined based on the proportion of circulating supply that is staked by the network and the validator's uptime/activity in the previous epoch. For example, consider a hypothetical instance of the network with an initial circulating token supply of 250MM tokens with an additional 250MM vesting over 3 years. Additionally an inflation rate is specified at network launch of 7.5%, and a disinflationary schedule of 20% decrease in inflation rate per year \(the actual rates to be implemented are to be worked out during the testnet experimentation phase of mainnet launch\). With these broad assumptions, the 10-year inflation rate \(adjusted daily for this example\) is shown in **Figure 1**, while the total circulating token supply is illustrated in **Figure 2**. Neglected in this toy-model is the inflation suppression due to the portion of each transaction fee that is to be destroyed. -![](../../../.gitbook/assets/p_ex_schedule.png) +![](/img/p_ex_schedule.png) **Figure 1:** In this example schedule, the annual inflation rate \[%\] reduces at around 20% per year, until it reaches the long-term, fixed, 1.5% rate. -![](../../../.gitbook/assets/p_ex_supply.png) +![](/img/p_ex_supply.png) **Figure 2:** The total token supply over a 10-year period, based on an initial 250MM tokens with the disinflationary inflation schedule as shown in **Figure 1**. Over time, the interest rate, at a fixed network staked percentage, will reduce concordant with network inflation. Validation-client interest rates are designed to be higher in the early days of the network to incentivize participation and jumpstart the network economy. As previously mentioned, the inflation rate is expected to stabilize near 1-2% which also results in a fixed, long-term, interest rate to be provided to validator-clients. This value does not represent the total interest available to validator-clients as transaction fees for state-validation are not accounted for here. Given these example parameters, annualized validator-specific interest rates can be determined based on the global fraction of tokens bonded as stake, as well as their uptime/activity in the previous epoch. For the purpose of this example, we assume 100% uptime for all validators and a split in interest-based rewards between validators nodes of 80%/20%. Additionally, the fraction of staked circulating supply is assumed to be constant. Based on these assumptions, an annualized validation-client interest rate schedule as a function of % circulating token supply that is staked is shown in **Figure 3**. -![](../../../.gitbook/assets/p_ex_interest.png) +![](/img/p_ex_interest.png) **Figure 3:** Shown here are example validator interest rates over time, neglecting transaction fees, segmented by fraction of total circulating supply bonded as stake. diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md index 0024519b82..a32032cae1 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md +++ b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md @@ -1,13 +1,15 @@ -# State-validation Transaction Fees +--- +title: State-validation Transaction Fees +--- **Subject to change.** Each transaction sent through the network, to be processed by the current leader validation-client and confirmed as a global state transaction, must contain a transaction fee. Transaction fees offer many benefits in the Solana economic design, for example they: -* provide unit compensation to the validator network for the CPU/GPU resources necessary to process the state transaction, -* reduce network spam by introducing real cost to transactions, -* open avenues for a transaction market to incentivize validation-client to collect and process submitted transactions in their function as leader, -* and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction, as described below. +- provide unit compensation to the validator network for the CPU/GPU resources necessary to process the state transaction, +- reduce network spam by introducing real cost to transactions, +- open avenues for a transaction market to incentivize validation-client to collect and process submitted transactions in their function as leader, +- and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction, as described below. Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on protocol-based rewards to support the economy in the short term, with the assumption that the revenue generated through transaction fees will support the economy in the long term, when the protocol derived rewards expire. In an attempt to create a sustainable economy through protocol-based rewards and transaction fees, a fixed portion of each transaction fee is destroyed, with the remaining fee going to the current leader processing the transaction. A scheduled global inflation rate provides a source for rewards distributed to validation-clients, through the process described above. diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md index 186440613c..2ea3d3120c 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md +++ b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_validation_stake_delegation.md @@ -1,27 +1,28 @@ -# Validation Stake Delegation +--- +title: Validation Stake Delegation +--- **Subject to change.** Running a Solana validation-client required relatively modest upfront hardware capital investment. **Table 2** provides an example hardware configuration to support ~1M tx/s with estimated ‘off-the-shelf’ costs: -| Component | Example | Estimated Cost | -| :--- | :--- | :--- | -| GPU | 2x 2080 Ti | $2500 | -| or | 4x 1080 Ti | $2800 | -| OS/Ledger Storage | Samsung 860 Evo 2TB | $370 | -| Accounts storage | 2x Samsung 970 Pro M.2 512GB | $340 | -| RAM | 32 Gb | $300 | -| Motherboard | AMD x399 | $400 | -| CPU | AMD Threadripper 2920x | $650 | -| Case | | $100 | -| Power supply | EVGA 1600W | $300 | -| Network | > 500 mbps | | -| Network \(1\) | Google webpass business bay area 1gbps unlimited | $5500/mo | -| Network \(2\) | Hurricane Electric bay area colo 1gbps | $500/mo | +| Component | Example | Estimated Cost | +| :---------------- | :----------------------------------------------- | :------------- | +| GPU | 2x 2080 Ti | \$2500 | +| or | 4x 1080 Ti | \$2800 | +| OS/Ledger Storage | Samsung 860 Evo 2TB | \$370 | +| Accounts storage | 2x Samsung 970 Pro M.2 512GB | \$340 | +| RAM | 32 Gb | \$300 | +| Motherboard | AMD x399 | \$400 | +| CPU | AMD Threadripper 2920x | \$650 | +| Case | | \$100 | +| Power supply | EVGA 1600W | \$300 | +| Network | > 500 mbps | | +| Network \(1\) | Google webpass business bay area 1gbps unlimited | \$5500/mo | +| Network \(2\) | Hurricane Electric bay area colo 1gbps | \$500/mo | **Table 2** example high-end hardware setup for running a Solana client. Despite the low-barrier to entry as a validation-client, from a capital investment perspective, as in any developing economy, there will be much opportunity and need for trusted validation services as evidenced by node reliability, UX/UI, APIs and other software accessibility tools. Additionally, although Solana’s validator node startup costs are nominal when compared to similar networks, they may still be somewhat restrictive for some potential participants. In the spirit of developing a true decentralized, permissionless network, these interested parties can become involved in the Solana network/economy via delegation of previously acquired tokens with a reliable validation node to earn a portion of the interest generated. Delegation of tokens to validation-clients provides a way for passive Solana token holders to become part of the active Solana economy and earn interest rates proportional to the interest rate generated by the delegated validation-client. Additionally, this feature intends to create a healthy validation-client market, with potential validation-client nodes competing to build reliable, transparent and profitable delegation services. - diff --git a/docs/src/implemented-proposals/embedding-move.md b/docs/src/implemented-proposals/embedding-move.md index 15f2e4e920..f8fb917612 100644 --- a/docs/src/implemented-proposals/embedding-move.md +++ b/docs/src/implemented-proposals/embedding-move.md @@ -1,4 +1,6 @@ -# Embedding the Move Langauge +--- +title: Embedding the Move Langauge +--- ## Problem @@ -10,15 +12,15 @@ The biggest design difference between Solana's runtime and Libra's Move VM is ho This proposal attempts to define a way to embed the Move VM such that: -* cross-module invocations within Move do not require the runtime's +- cross-module invocations within Move do not require the runtime's cross-program runtime checks -* Move programs can leverage functionality in other Solana programs and vice +- Move programs can leverage functionality in other Solana programs and vice versa -* Solana's runtime parallelism is exposed to batches of Move and non-Move +- Solana's runtime parallelism is exposed to batches of Move and non-Move transactions @@ -33,4 +35,3 @@ All data accounts owned by Move modules must set their owners to the loader, `MO ### Interacting with Solana programs To invoke instructions in non-Move programs, Solana would need to extend the Move VM with a `process_instruction()` system call. It would work the same as `process_instruction()` Rust BPF programs. - diff --git a/docs/src/implemented-proposals/installer.md b/docs/src/implemented-proposals/installer.md index e889b2a560..d3f7743820 100644 --- a/docs/src/implemented-proposals/installer.md +++ b/docs/src/implemented-proposals/installer.md @@ -1,4 +1,6 @@ -# Cluster Software Installation and Updates +--- +title: Cluster Software Installation and Updates +--- Currently users are required to build the solana cluster software themselves from the git repository and manually update it, which is error prone and inconvenient. @@ -93,11 +95,11 @@ To guard against rollback attacks, `solana-install` will refuse to install an up A release archive is expected to be a tar file compressed with bzip2 with the following internal structure: -* `/version.yml` - a simple YAML file containing the field `"target"` - the +- `/version.yml` - a simple YAML file containing the field `"target"` - the target tuple. Any additional fields are ignored. -* `/bin/` -- directory containing available programs in the release. +- `/bin/` -- directory containing available programs in the release. `solana-install` will symlink this directory to @@ -105,7 +107,7 @@ A release archive is expected to be a tar file compressed with bzip2 with the fo variable. -* `...` -- any additional files and directories are permitted +- `...` -- any additional files and directories are permitted ## solana-install Tool @@ -113,9 +115,9 @@ The `solana-install` tool is used by the user to install and update their cluste It manages the following files and directories in the user's home directory: -* `~/.config/solana/install/config.yml` - user configuration and information about currently installed software version -* `~/.local/share/solana/install/bin` - a symlink to the current release. eg, `~/.local/share/solana-update/-/bin` -* `~/.local/share/solana/install/releases//` - contents of a release +- `~/.config/solana/install/config.yml` - user configuration and information about currently installed software version +- `~/.local/share/solana/install/bin` - a symlink to the current release. eg, `~/.local/share/solana-update/-/bin` +- `~/.local/share/solana/install/releases//` - contents of a release ### Command-line Interface @@ -212,4 +214,3 @@ ARGS: The program will be restarted upon a successful software update ``` - diff --git a/docs/src/implemented-proposals/leader-leader-transition.md b/docs/src/implemented-proposals/leader-leader-transition.md index ee84fa3e86..f19036d92e 100644 --- a/docs/src/implemented-proposals/leader-leader-transition.md +++ b/docs/src/implemented-proposals/leader-leader-transition.md @@ -1,4 +1,6 @@ -# Leader-to-Leader Transition +--- +title: Leader-to-Leader Transition +--- This design describes how leaders transition production of the PoH ledger between each other as each leader generates its own slot. @@ -18,19 +20,19 @@ While a leader is actively receiving entries for the previous slot, the leader c The downsides: -* Leader delays its own slot, potentially allowing the next leader more time to +- Leader delays its own slot, potentially allowing the next leader more time to catch up. The upsides compared to guards: -* All the space in a block is used for entries. -* The timeout is not fixed. -* The timeout is local to the leader, and therefore can be clever. The leader's heuristic can take into account turbine performance. -* This design doesn't require a ledger hard fork to update. -* The previous leader can redundantly transmit the last entry in the block to the next leader, and the next leader can speculatively decide to trust it to generate its block without verification of the previous block. -* The leader can speculatively generate the last tick from the last received entry. -* The leader can speculatively process transactions and guess which ones are not going to be encoded by the previous leader. This is also a censorship attack vector. The current leader may withhold transactions that it receives from the clients so it can encode them into its own slot. Once processed, entries can be replayed into PoH quickly. +- All the space in a block is used for entries. +- The timeout is not fixed. +- The timeout is local to the leader, and therefore can be clever. The leader's heuristic can take into account turbine performance. +- This design doesn't require a ledger hard fork to update. +- The previous leader can redundantly transmit the last entry in the block to the next leader, and the next leader can speculatively decide to trust it to generate its block without verification of the previous block. +- The leader can speculatively generate the last tick from the last received entry. +- The leader can speculatively process transactions and guess which ones are not going to be encoded by the previous leader. This is also a censorship attack vector. The current leader may withhold transactions that it receives from the clients so it can encode them into its own slot. Once processed, entries can be replayed into PoH quickly. ## Alternative design options @@ -42,13 +44,12 @@ If the next leader receives the _penultimate tick_ before it produces its own _f The downsides: -* Every vote, and therefore confirmation, is delayed by a fixed timeout. 1 tick, or around 100ms. -* Average case confirmation time for a transaction would be at least 50ms worse. -* It is part of the ledger definition, so to change this behavior would require a hard fork. -* Not all the available space is used for entries. +- Every vote, and therefore confirmation, is delayed by a fixed timeout. 1 tick, or around 100ms. +- Average case confirmation time for a transaction would be at least 50ms worse. +- It is part of the ledger definition, so to change this behavior would require a hard fork. +- Not all the available space is used for entries. The upsides compared to leader timeout: -* The next leader has received all the previous entries, so it can start processing transactions without recording them into PoH. -* The previous leader can redundantly transmit the last entry containing the _penultimate tick_ to the next leader. The next leader can speculatively generate the _last tick_ as soon as it receives the _penultimate tick_, even before verifying it. - +- The next leader has received all the previous entries, so it can start processing transactions without recording them into PoH. +- The previous leader can redundantly transmit the last entry containing the _penultimate tick_ to the next leader. The next leader can speculatively generate the _last tick_ as soon as it receives the _penultimate tick_, even before verifying it. diff --git a/docs/src/implemented-proposals/leader-validator-transition.md b/docs/src/implemented-proposals/leader-validator-transition.md index 49a9641c02..3793cb890b 100644 --- a/docs/src/implemented-proposals/leader-validator-transition.md +++ b/docs/src/implemented-proposals/leader-validator-transition.md @@ -1,4 +1,6 @@ -# Leader-to-Validator Transition +--- +title: Leader-to-Validator Transition +--- A validator typically spends its time validating blocks. If, however, a staker delegates its stake to a validator, it will occasionally be selected as a _slot leader_. As a slot leader, the validator is responsible for producing blocks during an assigned _slot_. A slot has a duration of some number of preconfigured _ticks_. The duration of those ticks are estimated with a _PoH Recorder_ described later in this document. @@ -48,4 +50,3 @@ The loop is synchronized to PoH and does a synchronous start and stop of the slo the TVU may resume voting. 5. Goto 1. - diff --git a/docs/src/implemented-proposals/persistent-account-storage.md b/docs/src/implemented-proposals/persistent-account-storage.md index 0a9be436f6..b41765e903 100644 --- a/docs/src/implemented-proposals/persistent-account-storage.md +++ b/docs/src/implemented-proposals/persistent-account-storage.md @@ -1,4 +1,6 @@ -# Persistent Account Storage +--- +title: Persistent Account Storage +--- ## Persistent Account Storage @@ -49,9 +51,9 @@ An account can be _garbage-collected_ when squashing makes it unreachable. Three possible options exist: -* Maintain a HashSet of root forks. One is expected to be created every second. The entire tree can be garbage-collected later. Alternatively, if every fork keeps a reference count of accounts, garbage collection could occur any time an index location is updated. -* Remove any pruned forks from the index. Any remaining forks lower in number than the root are can be considered root. -* Scan the index, migrate any old roots into the new one. Any remaining forks lower than the new root can be deleted later. +- Maintain a HashSet of root forks. One is expected to be created every second. The entire tree can be garbage-collected later. Alternatively, if every fork keeps a reference count of accounts, garbage collection could occur any time an index location is updated. +- Remove any pruned forks from the index. Any remaining forks lower in number than the root are can be considered root. +- Scan the index, migrate any old roots into the new one. Any remaining forks lower than the new root can be deleted later. ## Append-only Writes @@ -85,10 +87,9 @@ To snapshot, the underlying memory-mapped files in the AppendVec need to be flus ## Performance -* Append-only writes are fast. SSDs and NVMEs, as well as all the OS level kernel data structures, allow for appends to run as fast as PCI or NVMe bandwidth will allow \(2,700 MB/s\). -* Each replay and banking thread writes concurrently to its own AppendVec. -* Each AppendVec could potentially be hosted on a separate NVMe. -* Each replay and banking thread has concurrent read access to all the AppendVecs without blocking writes. -* Index requires an exclusive write lock for writes. Single-thread performance for HashMap updates is on the order of 10m per second. -* Banking and Replay stages should use 32 threads per NVMe. NVMes have optimal performance with 32 concurrent readers or writers. - +- Append-only writes are fast. SSDs and NVMEs, as well as all the OS level kernel data structures, allow for appends to run as fast as PCI or NVMe bandwidth will allow \(2,700 MB/s\). +- Each replay and banking thread writes concurrently to its own AppendVec. +- Each AppendVec could potentially be hosted on a separate NVMe. +- Each replay and banking thread has concurrent read access to all the AppendVecs without blocking writes. +- Index requires an exclusive write lock for writes. Single-thread performance for HashMap updates is on the order of 10m per second. +- Banking and Replay stages should use 32 threads per NVMe. NVMes have optimal performance with 32 concurrent readers or writers. diff --git a/docs/src/implemented-proposals/program-derived-addresses.md b/docs/src/implemented-proposals/program-derived-addresses.md index ba2caa8539..3e35f6ba46 100644 --- a/docs/src/implemented-proposals/program-derived-addresses.md +++ b/docs/src/implemented-proposals/program-derived-addresses.md @@ -1,4 +1,6 @@ -# Program Derived Addresses +--- +title: Program Derived Addresses +--- ## Problem @@ -7,14 +9,14 @@ other programs as defined in the [Cross-Program Invocations](cross-program-invoc design. The lack of programmatic signature generation limits the kinds of programs -that can be implemented in Solana. A program may be given the +that can be implemented in Solana. A program may be given the authority over an account and later want to transfer that authority to another. This is impossible today because the program cannot act as the signer in the transaction that gives authority. For example, if two users want to make a wager on the outcome of a game in Solana, they must each transfer their wager's assets to some intermediary that will honor -their agreement. Currently, there is no way to implement this intermediary +their agreement. Currently, there is no way to implement this intermediary as a program in Solana because the intermediary program cannot transfer the assets to the winner. @@ -22,24 +24,24 @@ This capability is necessary for many DeFi applications since they require assets to be transferred to an escrow agent until some event occurs that determines the new owner. -* Decentralized Exchanges that transfer assets between matching bid and -ask orders. +- Decentralized Exchanges that transfer assets between matching bid and + ask orders. -* Auctions that transfer assets to the winner. +- Auctions that transfer assets to the winner. -* Games or prediction markets that collect and redistribute prizes to -the winners. +- Games or prediction markets that collect and redistribute prizes to + the winners. ## Proposed Solution The key to the design is two-fold: 1. Allow programs to control specific addresses, called Program-Addresses, in such a way that no external -user can generate valid transactions with signatures for those -addresses. + user can generate valid transactions with signatures for those + addresses. 2. Allow programs to programmatically sign for Program-Addresses that are -present in instructions invoked via [Cross-Program Invocations](cross-program-invocation.md). + present in instructions invoked via [Cross-Program Invocations](cross-program-invocation.md). Given the two conditions, users can securely transfer or assign the authority of on-chain assets to Program-Addresses and the program @@ -48,13 +50,13 @@ can then assign that authority elsewhere at its discretion. ### Private keys for Program Addresses A Program -Address has no private key associated with it, and generating -a signature for it is impossible. While it has no private key of +a signature for it is impossible. While it has no private key of its own, it can issue an instruction that includes the Program-Address as a signer. ### Hash-based generated Program Addresses All 256-bit values are valid ed25519 curve points and valid ed25519 public -keys. All are equally secure and equally as hard to break. +keys. All are equally secure and equally as hard to break. Based on this assumption, Program Addresses can be deterministically derived from a base seed using a 256-bit preimage resistant hash function. @@ -81,7 +83,7 @@ pub fn create_address_with_seed( ``` Programs can deterministically derive any number of addresses by -using keywords. These keywords can symbolically identify how the addresses are used. +using keywords. These keywords can symbolically identify how the addresses are used. ```rust,ignore //! Generate a derived program address @@ -146,9 +148,9 @@ fn transfer_one_token_from_escrow( ### Instructions that require signers The addresses generated with `create_program_address` are indistinguishable -from any other public key. The only way for the runtime to verify that the +from any other public key. The only way for the runtime to verify that the address belongs to a program is for the program to supply the keywords used to generate the address. The runtime will internally call `create_program_address`, and compare the -result against the addresses supplied in the instruction. \ No newline at end of file +result against the addresses supplied in the instruction. diff --git a/docs/src/implemented-proposals/readonly-accounts.md b/docs/src/implemented-proposals/readonly-accounts.md index aee7749b96..0900a5dec0 100644 --- a/docs/src/implemented-proposals/readonly-accounts.md +++ b/docs/src/implemented-proposals/readonly-accounts.md @@ -1,4 +1,6 @@ -# Read-Only Accounts +--- +title: Read-Only Accounts +--- This design covers the handling of readonly and writable accounts in the [runtime](../validator/runtime.md). Multiple transactions that modify the same account must be processed serially so that they are always replayed in the same order. Otherwise, this could introduce non-determinism to the ledger. Some transactions, however, only need to read, and not modify, the data in particular accounts. Multiple transactions that only read the same account can be processed in parallel, since replay order does not matter, providing a performance benefit. @@ -10,7 +12,7 @@ Runtime transaction processing rules need to be updated slightly. Programs still Readonly accounts have the following property: -* Read-only access to all account fields, including lamports (cannot be credited or debited), and account data +- Read-only access to all account fields, including lamports (cannot be credited or debited), and account data Instructions that credit, debit, or modify the readonly account will fail. diff --git a/docs/src/implemented-proposals/reliable-vote-transmission.md b/docs/src/implemented-proposals/reliable-vote-transmission.md index feecd49953..e29c6c6fd2 100644 --- a/docs/src/implemented-proposals/reliable-vote-transmission.md +++ b/docs/src/implemented-proposals/reliable-vote-transmission.md @@ -1,4 +1,6 @@ -# Reliable Vote Transmission +--- +title: Reliable Vote Transmission +--- Validator votes are messages that have a critical function for consensus and continuous operation of the network. Therefore it is critical that they are reliably delivered and encoded into the ledger. @@ -56,4 +58,3 @@ Everything above plus the following: 4. Worst case 25mb memory overhead per node. 5. Sub 4 hops worst case to deliver to the entire network. 6. 80 shreds received by the leader for all the validator messages. - diff --git a/docs/src/implemented-proposals/rent.md b/docs/src/implemented-proposals/rent.md index 7eb035a8b9..4465e44c18 100644 --- a/docs/src/implemented-proposals/rent.md +++ b/docs/src/implemented-proposals/rent.md @@ -1,4 +1,6 @@ -# Rent +--- +title: Rent +--- Accounts on Solana may have owner-controlled state \(`Account::data`\) that's separate from the account's balance \(`Account::lamports`\). Since validators on the network need to maintain a working copy of this state in memory, the network charges a time-and-space based fee for this resource consumption, also known as Rent. @@ -42,11 +44,11 @@ As the overall consequence of this design, all of accounts is stored equally as Collecting rent on an as-needed basis \(i.e. whenever accounts were loaded/accessed\) was considered. The issues with such an approach are: -* accounts loaded as "credit only" for a transaction could very reasonably be expected to have rent due, +- accounts loaded as "credit only" for a transaction could very reasonably be expected to have rent due, but would not be writable during any such transaction -* a mechanism to "beat the bushes" \(i.e. go find accounts that need to pay rent\) is desirable, +- a mechanism to "beat the bushes" \(i.e. go find accounts that need to pay rent\) is desirable, lest accounts that are loaded infrequently get a free ride @@ -54,6 +56,6 @@ Collecting rent on an as-needed basis \(i.e. whenever accounts were loaded/acces Collecting rent via a system instruction was considered, as it would naturally have distributed rent to active and stake-weighted nodes and could have been done incrementally. However: -* it would have adversely affected network throughput -* it would require special-casing by the runtime, as accounts with non-SystemProgram owners may be debited by this instruction -* someone would have to issue the transactions +- it would have adversely affected network throughput +- it would require special-casing by the runtime, as accounts with non-SystemProgram owners may be debited by this instruction +- someone would have to issue the transactions diff --git a/docs/src/implemented-proposals/repair-service.md b/docs/src/implemented-proposals/repair-service.md index 436a5eea8c..2de8105fb8 100644 --- a/docs/src/implemented-proposals/repair-service.md +++ b/docs/src/implemented-proposals/repair-service.md @@ -1,4 +1,6 @@ -# Repair Service +--- +title: Repair Service +--- ## Repair Service @@ -19,25 +21,27 @@ repair these slots. If these slots happen to be part of the main chain, this will halt replay progress on this node. ## Repair-related primitives + Epoch Slots: - Each validator advertises separately on gossip the various parts of an - `Epoch Slots`: - * The `stash`: An epoch-long compressed set of all completed slots. - * The `cache`: The Run-length Encoding (RLE) of the latest `N` completed - slots starting from some some slot `M`, where `N` is the number of slots - that will fit in an MTU-sized packet. +Each validator advertises separately on gossip the various parts of an +`Epoch Slots`: - `Epoch Slots` in gossip are updated every time a validator receives a - complete slot within the epoch. Completed slots are detected by blockstore - and sent over a channel to RepairService. It is important to note that we - know that by the time a slot `X` is complete, the epoch schedule must exist - for the epoch that contains slot `X` because WindowService will reject - shreds for unconfirmed epochs. +- The `stash`: An epoch-long compressed set of all completed slots. +- The `cache`: The Run-length Encoding (RLE) of the latest `N` completed + slots starting from some some slot `M`, where `N` is the number of slots + that will fit in an MTU-sized packet. + +`Epoch Slots` in gossip are updated every time a validator receives a +complete slot within the epoch. Completed slots are detected by blockstore +and sent over a channel to RepairService. It is important to note that we +know that by the time a slot `X` is complete, the epoch schedule must exist +for the epoch that contains slot `X` because WindowService will reject +shreds for unconfirmed epochs. + +Every `N/2` completed slots, the oldest `N/2` slots are moved from the +`cache` into the `stash`. The base value `M` for the RLE should also +be updated. - Every `N/2` completed slots, the oldest `N/2` slots are moved from the - `cache` into the `stash`. The base value `M` for the RLE should also - be updated. - ## Repair Request Protocols The repair protocol makes best attempts to progress the forking structure of @@ -46,28 +50,29 @@ Blockstore. The different protocol strategies to address the above challenges: 1. Shred Repair \(Addresses Challenge \#1\): This is the most basic repair -protocol, with the purpose of detecting and filling "holes" in the ledger. -Blockstore tracks the latest root slot. RepairService will then periodically -iterate every fork in blockstore starting from the root slot, sending repair -requests to validators for any missing shreds. It will send at most some `N` -repair reqeusts per iteration. Shred repair should prioritize repairing -forks based on the leader's fork weight. Validators should only send repair -requests to validators who have marked that slot as completed in their -EpochSlots. Validators should prioritize repairing shreds in each slot -that they are responsible for retransmitting through turbine. Validators can -compute which shreds they are responsible for retransmitting because the -seed for turbine is based on leader id, slot, and shred index. + protocol, with the purpose of detecting and filling "holes" in the ledger. + Blockstore tracks the latest root slot. RepairService will then periodically + iterate every fork in blockstore starting from the root slot, sending repair + requests to validators for any missing shreds. It will send at most some `N` + repair reqeusts per iteration. Shred repair should prioritize repairing + forks based on the leader's fork weight. Validators should only send repair + requests to validators who have marked that slot as completed in their + EpochSlots. Validators should prioritize repairing shreds in each slot + that they are responsible for retransmitting through turbine. Validators can + compute which shreds they are responsible for retransmitting because the + seed for turbine is based on leader id, slot, and shred index. Note: Validators will only accept shreds within the current verifiable epoch \(epoch the validator has a leader schedule for\). 2. Preemptive Slot Repair \(Addresses Challenge \#2\): The goal of this -protocol is to discover the chaining relationship of "orphan" slots that do not -currently chain to any known fork. Shred repair should prioritize repairing -orphan slots based on the leader's fork weight. - * Blockstore will track the set of "orphan" slots in a separate column family. - * RepairService will periodically make `Orphan` requests for each of - the orphans in blockstore. + protocol is to discover the chaining relationship of "orphan" slots that do not + currently chain to any known fork. Shred repair should prioritize repairing + orphan slots based on the leader's fork weight. + + - Blockstore will track the set of "orphan" slots in a separate column family. + - RepairService will periodically make `Orphan` requests for each of + the orphans in blockstore. `Orphan(orphan)` request - `orphan` is the orphan slot that the requestor wants to know the parents of `Orphan(orphan)` response - @@ -77,9 +82,9 @@ orphan slots based on the leader's fork weight. On receiving the responses `p`, where `p` is some shred in a parent slot, validators will: - * Insert an empty `SlotMeta` in blockstore for `p.slot` if it doesn't - already exist. - * If `p.slot` does exist, update the parent of `p` based on `parents` + - Insert an empty `SlotMeta` in blockstore for `p.slot` if it doesn't + already exist. + - If `p.slot` does exist, update the parent of `p` based on `parents` Note: that once these empty slots are added to blockstore, the `Shred Repair` protocol should attempt to fill those slots. @@ -95,10 +100,9 @@ randomly select a validator in a stake-weighted fashion. ## Repair Response Protocol When a validator receives a request for a shred `S`, they respond with the -shred if they have it. +shred if they have it. When a validator receives a shred through a repair response, they check `EpochSlots` to see if <= `1/3` of the network has marked this slot as completed. If so, they resubmit this shred through its associated turbine path, but only if this validator has not retransmitted this shred before. - diff --git a/docs/src/implemented-proposals/snapshot-verification.md b/docs/src/implemented-proposals/snapshot-verification.md index d2b41ee83a..88fdc356d9 100644 --- a/docs/src/implemented-proposals/snapshot-verification.md +++ b/docs/src/implemented-proposals/snapshot-verification.md @@ -1,4 +1,6 @@ -# Snapshot Verification +--- +title: Snapshot Verification +--- ## Problem @@ -18,11 +20,11 @@ To verify the snapshot, we do the following: On account store of non-zero lamport accounts, we hash the following data: -* Account owner -* Account data -* Account pubkey -* Account lamports balance -* Fork the account is stored on +- Account owner +- Account data +- Account pubkey +- Account lamports balance +- Fork the account is stored on Use this resulting hash value as input to an expansion function which expands the hash value into an image value. The function will create a 440 byte block of data where the first 32 bytes are the hash value, and the next 440 - 32 bytes are @@ -42,7 +44,7 @@ a validator bank to read that an account is not present when it really should be An attack on the xor state could be made to influence its value: -Thus the 440 byte image size comes from this paper, avoiding xor collision with 0 \(or thus any other given bit pattern\): \[[https://link.springer.com/content/pdf/10.1007%2F3-540-45708-9\_19.pdf](https://link.springer.com/content/pdf/10.1007%2F3-540-45708-9_19.pdf)\] +Thus the 440 byte image size comes from this paper, avoiding xor collision with 0 \(or thus any other given bit pattern\): \[[https://link.springer.com/content/pdf/10.1007%2F3-540-45708-9_19.pdf](https://link.springer.com/content/pdf/10.1007%2F3-540-45708-9_19.pdf)\] The math provides 128 bit security in this case: @@ -52,4 +54,3 @@ k=2^40 accounts n=440 2^(40) * 2^(448 * 8 / 41) ~= O(2^(128)) ``` - diff --git a/docs/src/implemented-proposals/staking-rewards.md b/docs/src/implemented-proposals/staking-rewards.md index 82aa7c3d9f..7aae3d21fa 100644 --- a/docs/src/implemented-proposals/staking-rewards.md +++ b/docs/src/implemented-proposals/staking-rewards.md @@ -1,16 +1,18 @@ -# Staking Rewards +--- +title: Staking Rewards +--- A Proof of Stake \(PoS\), \(i.e. using in-protocol asset, SOL, to provide secure consensus\) design is outlined here. Solana implements a proof of stake reward/security scheme for validator nodes in the cluster. The purpose is threefold: -* Align validator incentives with that of the greater cluster through +- Align validator incentives with that of the greater cluster through skin-in-the-game deposits at risk -* Avoid 'nothing at stake' fork voting issues by implementing slashing rules +- Avoid 'nothing at stake' fork voting issues by implementing slashing rules aimed at promoting fork convergence -* Provide an avenue for validator rewards provided as a function of validator +- Provide an avenue for validator rewards provided as a function of validator participation in the cluster. @@ -22,13 +24,13 @@ Solana's ledger validation design is based on a rotating, stake-weighted selecte To become a Solana validator, one must deposit/lock-up some amount of SOL in a contract. This SOL will not be accessible for a specific time period. The precise duration of the staking lockup period has not been determined. However we can consider three phases of this time for which specific parameters will be necessary: -* _Warm-up period_: which SOL is deposited and inaccessible to the node, +- _Warm-up period_: which SOL is deposited and inaccessible to the node, however PoH transaction validation has not begun. Most likely on the order of days to weeks -* _Validation period_: a minimum duration for which the deposited SOL will be +- _Validation period_: a minimum duration for which the deposited SOL will be inaccessible, at risk of slashing \(see slashing rules below\) and earning @@ -36,7 +38,7 @@ To become a Solana validator, one must deposit/lock-up some amount of SOL in a c year. -* _Cool-down period_: a duration of time following the submission of a +- _Cool-down period_: a duration of time following the submission of a 'withdrawal' transaction. During this period validation responsibilities have @@ -53,4 +55,3 @@ Solana's trustless sense of time and ordering provided by its PoH data structure As discussed in the [Economic Design](../implemented-proposals/ed_overview/README.md) section, annual validator interest rates are to be specified as a function of total percentage of circulating supply that has been staked. The cluster rewards validators who are online and actively participating in the validation process throughout the entirety of their _validation period_. For validators that go offline/fail to validate transactions during this period, their annual reward is effectively reduced. Similarly, we may consider an algorithmic reduction in a validator's active amount staked amount in the case that they are offline. I.e. if a validator is inactive for some amount of time, either due to a partition or otherwise, the amount of their stake that is considered ‘active’ \(eligible to earn rewards\) may be reduced. This design would be structured to help long-lived partitions to eventually reach finality on their respective chains as the % of non-voting total stake is reduced over time until a supermajority can be achieved by the active validators in each partition. Similarly, upon re-engaging, the ‘active’ amount staked will come back online at some defined rate. Different rates of stake reduction may be considered depending on the size of the partition/active set. - diff --git a/docs/src/implemented-proposals/testing-programs.md b/docs/src/implemented-proposals/testing-programs.md index 9f10008712..03651293ab 100644 --- a/docs/src/implemented-proposals/testing-programs.md +++ b/docs/src/implemented-proposals/testing-programs.md @@ -1,18 +1,20 @@ -# Testing Programs +--- +title: Testing Programs +--- Applications send transactions to a Solana cluster and query validators to confirm the transactions were processed and to check each transaction's result. When the cluster doesn't behave as anticipated, it could be for a number of reasons: -* The program is buggy -* The BPF loader rejected an unsafe program instruction -* The transaction was too big -* The transaction was invalid -* The Runtime tried to execute the transaction when another one was accessing +- The program is buggy +- The BPF loader rejected an unsafe program instruction +- The transaction was too big +- The transaction was invalid +- The Runtime tried to execute the transaction when another one was accessing the same account -* The network dropped the transaction -* The cluster rolled back the ledger -* A validator responded to queries maliciously +- The network dropped the transaction +- The cluster rolled back the ledger +- A validator responded to queries maliciously ## The AsyncClient and SyncClient Traits @@ -49,4 +51,3 @@ Below the TPU level is the Bank. The Bank doesn't do signature verification or g ## Unit-testing with the Runtime Below the Bank is the Runtime. The Runtime is the ideal test environment for unit-testing. By statically linking the Runtime into a native program implementation, the developer gains the shortest possible edit-compile-run loop. Without any dynamic linking, stack traces include debug symbols and program errors are straightforward to troubleshoot. - diff --git a/docs/src/implemented-proposals/tower-bft.md b/docs/src/implemented-proposals/tower-bft.md index 5b10e0e39e..444b2d5d52 100644 --- a/docs/src/implemented-proposals/tower-bft.md +++ b/docs/src/implemented-proposals/tower-bft.md @@ -1,12 +1,14 @@ -# Tower BFT +--- +title: Tower BFT +--- This design describes Solana's _Tower BFT_ algorithm. It addresses the following problems: -* Some forks may not end up accepted by the supermajority of the cluster, and voters need to recover from voting on such forks. -* Many forks may be votable by different voters, and each voter may see a different set of votable forks. The selected forks should eventually converge for the cluster. -* Reward based votes have an associated risk. Voters should have the ability to configure how much risk they take on. -* The [cost of rollback](tower-bft.md#cost-of-rollback) needs to be computable. It is important to clients that rely on some measurable form of Consistency. The costs to break consistency need to be computable, and increase super-linearly for older votes. -* ASIC speeds are different between nodes, and attackers could employ Proof of History ASICS that are much faster than the rest of the cluster. Consensus needs to be resistant to attacks that exploit the variability in Proof of History ASIC speed. +- Some forks may not end up accepted by the supermajority of the cluster, and voters need to recover from voting on such forks. +- Many forks may be votable by different voters, and each voter may see a different set of votable forks. The selected forks should eventually converge for the cluster. +- Reward based votes have an associated risk. Voters should have the ability to configure how much risk they take on. +- The [cost of rollback](tower-bft.md#cost-of-rollback) needs to be computable. It is important to clients that rely on some measurable form of Consistency. The costs to break consistency need to be computable, and increase super-linearly for older votes. +- ASIC speeds are different between nodes, and attackers could employ Proof of History ASICS that are much faster than the rest of the cluster. Consensus needs to be resistant to attacks that exploit the variability in Proof of History ASIC speed. For brevity this design assumes that a single voter with a stake is deployed as an individual validator in the cluster. @@ -35,35 +37,35 @@ Before a vote is pushed to the stack, all the votes leading up to vote with a lo For example, a vote stack with the following state: | vote | vote time | lockout | lock expiration time | -| ---: | ---: | ---: | ---: | -| 4 | 4 | 2 | 6 | -| 3 | 3 | 4 | 7 | -| 2 | 2 | 8 | 10 | -| 1 | 1 | 16 | 17 | +| ---: | --------: | ------: | -------------------: | +| 4 | 4 | 2 | 6 | +| 3 | 3 | 4 | 7 | +| 2 | 2 | 8 | 10 | +| 1 | 1 | 16 | 17 | _Vote 5_ is at time 9, and the resulting state is | vote | vote time | lockout | lock expiration time | -| ---: | ---: | ---: | ---: | -| 5 | 9 | 2 | 11 | -| 2 | 2 | 8 | 10 | -| 1 | 1 | 16 | 17 | +| ---: | --------: | ------: | -------------------: | +| 5 | 9 | 2 | 11 | +| 2 | 2 | 8 | 10 | +| 1 | 1 | 16 | 17 | _Vote 6_ is at time 10 | vote | vote time | lockout | lock expiration time | -| ---: | ---: | ---: | ---: | -| 6 | 10 | 2 | 12 | -| 5 | 9 | 4 | 13 | -| 2 | 2 | 8 | 10 | -| 1 | 1 | 16 | 17 | +| ---: | --------: | ------: | -------------------: | +| 6 | 10 | 2 | 12 | +| 5 | 9 | 4 | 13 | +| 2 | 2 | 8 | 10 | +| 1 | 1 | 16 | 17 | At time 10 the new votes caught up to the previous votes. But _vote 2_ expires at 10, so the when _vote 7_ at time 11 is applied the votes including and above _vote 2_ will be popped. | vote | vote time | lockout | lock expiration time | -| ---: | ---: | ---: | ---: | -| 7 | 11 | 2 | 13 | -| 1 | 1 | 16 | 17 | +| ---: | --------: | ------: | -------------------: | +| 7 | 11 | 2 | 13 | +| 1 | 1 | 16 | 17 | The lockout for vote 1 will not increase from 16 until the stack contains 5 votes. @@ -85,18 +87,18 @@ Each validator can independently set a threshold of cluster commitment to a fork The following parameters need to be tuned: -* Number of votes in the stack before dequeue occurs \(32\). -* Rate of growth for lockouts in the stack \(2x\). -* Starting default lockout \(2\). -* Threshold depth for minimum cluster commitment before committing to the fork \(8\). -* Minimum cluster commitment size at threshold depth \(50%+\). +- Number of votes in the stack before dequeue occurs \(32\). +- Rate of growth for lockouts in the stack \(2x\). +- Starting default lockout \(2\). +- Threshold depth for minimum cluster commitment before committing to the fork \(8\). +- Minimum cluster commitment size at threshold depth \(50%+\). ### Free Choice A "Free Choice" is an unenforcible validator action. There is no way for the protocol to encode and enforce these actions since each validator can modify the code and adjust the algorithm. A validator that maximizes self-reward over all possible futures should behave in such a way that the system is stable, and the local greedy choice should result in a greedy choice over all possible futures. A set of validator that are engaging in choices to disrupt the protocol should be bound by their stake weight to the denial of service. Two options exits for validator: -* a validator can outrun previous validator in virtual generation and submit a concurrent fork -* a validator can withhold a vote to observe multiple forks before voting +- a validator can outrun previous validator in virtual generation and submit a concurrent fork +- a validator can withhold a vote to observe multiple forks before voting In both cases, the validator in the cluster have several forks to pick from concurrently, even though each fork represents a different height. In both cases it is impossible for the protocol to detect if the validator behavior is intentional or not. @@ -129,8 +131,8 @@ This attack is then limited to censoring the previous leaders fees, and individu An attacker generates a concurrent fork from an older block to try to rollback the cluster. In this attack the concurrent fork is competing with forks that have already been voted on. This attack is limited by the exponential growth of the lockouts. -* 1 vote has a lockout of 2 slots. Concurrent fork must be at least 2 slots ahead, and be produced in 1 slot. Therefore requires an ASIC 2x faster. -* 2 votes have a lockout of 4 slots. Concurrent fork must be at least 4 slots ahead and produced in 2 slots. Therefore requires an ASIC 2x faster. -* 3 votes have a lockout of 8 slots. Concurrent fork must be at least 8 slots ahead and produced in 3 slots. Therefore requires an ASIC 2.6x faster. -* 10 votes have a lockout of 1024 slots. 1024/10, or 102.4x faster ASIC. -* 20 votes have a lockout of 2^20 slots. 2^20/20, or 52,428.8x faster ASIC. +- 1 vote has a lockout of 2 slots. Concurrent fork must be at least 2 slots ahead, and be produced in 1 slot. Therefore requires an ASIC 2x faster. +- 2 votes have a lockout of 4 slots. Concurrent fork must be at least 4 slots ahead and produced in 2 slots. Therefore requires an ASIC 2x faster. +- 3 votes have a lockout of 8 slots. Concurrent fork must be at least 8 slots ahead and produced in 3 slots. Therefore requires an ASIC 2.6x faster. +- 10 votes have a lockout of 1024 slots. 1024/10, or 102.4x faster ASIC. +- 20 votes have a lockout of 2^20 slots. 2^20/20, or 52,428.8x faster ASIC. diff --git a/docs/src/implemented-proposals/transaction-fees.md b/docs/src/implemented-proposals/transaction-fees.md index 2071a0f8e5..fb2165b7f0 100644 --- a/docs/src/implemented-proposals/transaction-fees.md +++ b/docs/src/implemented-proposals/transaction-fees.md @@ -1,4 +1,6 @@ -# Deterministic Transaction Fees +--- +title: Deterministic Transaction Fees +--- Transactions currently include a fee field that indicates the maximum fee field a slot leader is permitted to charge to process a transaction. The cluster, on the other hand, agrees on a minimum fee. If the network is congested, the slot leader may prioritize the transactions offering higher fees. That means the client won't know how much was collected until the transaction is confirmed by the cluster and the remaining balance is checked. It smells of exactly what we dislike about Ethereum's "gas", non-determinism. @@ -14,14 +16,14 @@ Before sending a transaction to the cluster, a client may submit the transaction ## Fee Parameters -In the first implementation of this design, the only fee parameter is `lamports_per_signature`. The more signatures the cluster needs to verify, the higher the fee. The exact number of lamports is determined by the ratio of SPS to the SPS target. At the end of each slot, the cluster lowers `lamports_per_signature` when SPS is below the target and raises it when above the target. The minimum value for `lamports_per_signature` is 50% of the target `lamports_per_signature` and the maximum value is 10x the target \`lamports\_per\_signature' +In the first implementation of this design, the only fee parameter is `lamports_per_signature`. The more signatures the cluster needs to verify, the higher the fee. The exact number of lamports is determined by the ratio of SPS to the SPS target. At the end of each slot, the cluster lowers `lamports_per_signature` when SPS is below the target and raises it when above the target. The minimum value for `lamports_per_signature` is 50% of the target `lamports_per_signature` and the maximum value is 10x the target \`lamports_per_signature' Future parameters might include: -* `lamports_per_pubkey` - cost to load an account -* `lamports_per_slot_distance` - higher cost to load very old accounts -* `lamports_per_byte` - cost per size of account loaded -* `lamports_per_bpf_instruction` - cost to run a program +- `lamports_per_pubkey` - cost to load an account +- `lamports_per_slot_distance` - higher cost to load very old accounts +- `lamports_per_byte` - cost per size of account loaded +- `lamports_per_bpf_instruction` - cost to run a program ## Attacks diff --git a/docs/src/implemented-proposals/validator-timestamp-oracle.md b/docs/src/implemented-proposals/validator-timestamp-oracle.md index c48a756a8d..a595e710a5 100644 --- a/docs/src/implemented-proposals/validator-timestamp-oracle.md +++ b/docs/src/implemented-proposals/validator-timestamp-oracle.md @@ -1,4 +1,6 @@ -# Validator Timestamp Oracle +--- +title: Validator Timestamp Oracle +--- Third-party users of Solana sometimes need to know the real-world time a block was produced, generally to meet compliance requirements for external auditors or @@ -10,17 +12,18 @@ The general outline of the proposed implementation is as follows: - At regular intervals, each validator records its observed time for a known slot on-chain (via a Timestamp added to a slot Vote) - A client can request a block time for a rooted block using the `getBlockTime` -RPC method. When a client requests a timestamp for block N: + RPC method. When a client requests a timestamp for block N: 1. A validator determines a "cluster" timestamp for a recent timestamped slot - before block N by observing all the timestamped Vote instructions recorded on - the ledger that reference that slot, and determining the stake-weighted mean - timestamp. + before block N by observing all the timestamped Vote instructions recorded on + the ledger that reference that slot, and determining the stake-weighted mean + timestamp. 2. This recent mean timestamp is then used to calculate the timestamp of - block N using the cluster's established slot duration + block N using the cluster's established slot duration Requirements: + - Any validator replaying the ledger in the future must come up with the same time for every block since genesis - Estimated block times should not drift more than an hour or so before resolving @@ -43,8 +46,7 @@ records its observed time by including a timestamp in its Vote instruction submission. The corresponding slot for the timestamp is the newest Slot in the Vote vector (`Vote::slots.iter().max()`). It is signed by the validator's identity keypair as a usual Vote. In order to enable this reporting, the Vote -struct needs to be extended to include a timestamp field, `timestamp: -Option`, which will be set to `None` in most Votes. +struct needs to be extended to include a timestamp field, `timestamp: Option`, which will be set to `None` in most Votes. This proposal suggests that Vote instructions with `Some(timestamp)` be issued every 30min, which should be short enough to prevent block times drifting very @@ -67,7 +69,7 @@ A validator's vote account will hold its most recent slot-timestamp in VoteState ### Vote Program The on-chain Vote program needs to be extended to process a timestamp sent with -a Vote instruction from validators. In addition to its current process\_vote +a Vote instruction from validators. In addition to its current process_vote functionality (including loading the correct Vote account and verifying that the transaction signer is the expected validator), this process needs to compare the timestamp and corresponding slot to the currently stored values to verify that @@ -86,7 +88,7 @@ let timestamp_slot = floor(current_slot / timestamp_interval); Then the validator needs to gather all Vote WithTimestamp transactions from the ledger that reference that slot, using `Blockstore::get_slot_entries()`. As these transactions could have taken some time to reach and be processed by the leader, -the validator needs to scan several completed blocks after the timestamp\_slot to +the validator needs to scan several completed blocks after the timestamp_slot to get a reasonable set of Timestamps. The exact number of slots will need to be tuned: More slots will enable greater cluster participation and more timestamp datapoints; fewer slots will speed how long timestamp filtering takes. @@ -109,5 +111,5 @@ let block_n_timestamp = mean_timestamp + (block_n_slot_offset * slot_duration); ``` where `block_n_slot_offset` is the difference between the slot of block N and -the timestamp\_slot, and `slot_duration` is derived from the cluster's +the timestamp_slot, and `slot_duration` is derived from the cluster's `slots_per_year` stored in each Bank diff --git a/docs/src/integrations/exchange.md b/docs/src/integrations/exchange.md index ef085e7f67..9a41339cdd 100644 --- a/docs/src/integrations/exchange.md +++ b/docs/src/integrations/exchange.md @@ -1,4 +1,6 @@ -# Add Solana to Your Exchange +--- +title: Add Solana to Your Exchange +--- This guide describes how to add Solana's native token SOL to your cryptocurrency exchange. @@ -13,6 +15,7 @@ To run an api node: 1. [Install the Solana command-line tool suite](../cli/install-solana-cli-tools.md) 2. Boot the node with at least the following parameters: + ```bash solana-validator \ --ledger \ @@ -27,18 +30,19 @@ solana-validator \ --no-untrusted-rpc ``` - Customize `--ledger` to your desired ledger storage location, and `--rpc-port` to the port you want to expose. +Customize `--ledger` to your desired ledger storage location, and `--rpc-port` to the port you want to expose. - The `--entrypoint`, `--expected-genesis-hash`, and `--expected-shred-version` parameters are all specific to the cluster you are joining. The shred version will change on any hard forks in the cluster, so including `--expected-shred-version` ensures you are receiving current data from the cluster you expect. - [Current parameters for Mainnet Beta](../clusters.md#example-solana-validator-command-line-2) +The `--entrypoint`, `--expected-genesis-hash`, and `--expected-shred-version` parameters are all specific to the cluster you are joining. The shred version will change on any hard forks in the cluster, so including `--expected-shred-version` ensures you are receiving current data from the cluster you expect. +[Current parameters for Mainnet Beta](../clusters.md#example-solana-validator-command-line-2) - The `--limit-ledger-size` parameter allows you to specify how many ledger [shreds](../terminology.md#shred) your node retains on disk. If you do not include this parameter, the ledger will keep the entire ledger until it runs out of disk space. A larger value like `--limit-ledger-size 250000000000` is good for a couple days +The `--limit-ledger-size` parameter allows you to specify how many ledger [shreds](../terminology.md#shred) your node retains on disk. If you do not include this parameter, the ledger will keep the entire ledger until it runs out of disk space. A larger value like `--limit-ledger-size 250000000000` is good for a couple days - Specifying one or more `--trusted-validator` parameters can protect you from booting from a malicious snapshot. [More on the value of booting with trusted validators](../running-validator/validator-start.md#trusted-validators) +Specifying one or more `--trusted-validator` parameters can protect you from booting from a malicious snapshot. [More on the value of booting with trusted validators](../running-validator/validator-start.md#trusted-validators) - Optional parameters to consider: - - `--private-rpc` prevents your RPC port from being published for use by other nodes - - `--rpc-bind-address` allows you to specify a different IP address to bind the RPC port +Optional parameters to consider: + +- `--private-rpc` prevents your RPC port from being published for use by other nodes +- `--rpc-bind-address` allows you to specify a different IP address to bind the RPC port ### Automatic Restarts @@ -102,17 +106,18 @@ The easiest way to track all the deposit accounts for your exchange is to poll for each confirmed block and inspect for addresses of interest, using the JSON-RPC service of your Solana api node. -* To identify which blocks are available, send a [`getConfirmedBlocks` request](../apps/jsonrpc-api.md#getconfirmedblocks), -passing the last block you have already processed as the start-slot parameter: +- To identify which blocks are available, send a [`getConfirmedBlocks` request](../apps/jsonrpc-api.md#getconfirmedblocks), + passing the last block you have already processed as the start-slot parameter: ```bash curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlocks","params":[5]}' localhost:8899 {"jsonrpc":"2.0","result":[5,6,8,9,11],"id":1} ``` + Not every slot produces a block, so there may be gaps in the sequence of integers. -* For each block, request its contents with a [`getConfirmedBlock` request](../apps/jsonrpc-api.md#getconfirmedblock): +- For each block, request its contents with a [`getConfirmedBlock` request](../apps/jsonrpc-api.md#getconfirmedblock): ```bash curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[5, "json"]}' localhost:8899 @@ -195,8 +200,8 @@ can request the block from RPC in binary format, and parse it using either our You can also query the transaction history of a specific address. -* Send a [`getConfirmedSignaturesForAddress`](../apps/jsonrpc-api.md#getconfirmedsignaturesforaddress) -request to the api node, specifying a range of recent slots: +- Send a [`getConfirmedSignaturesForAddress`](../apps/jsonrpc-api.md#getconfirmedsignaturesforaddress) + request to the api node, specifying a range of recent slots: ```bash curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedSignaturesForAddress","params":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC", 0, 10]}' localhost:8899 @@ -212,8 +217,8 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m } ``` -* For each signature returned, get the transaction details by sending a -[`getConfirmedTransaction`](../apps/jsonrpc-api.md#getconfirmedtransaction) request: +- For each signature returned, get the transaction details by sending a + [`getConfirmedTransaction`](../apps/jsonrpc-api.md#getconfirmedtransaction) request: ```bash curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["dhjhJp2V2ybQGVfELWM1aZy98guVVsxRCB5KhNiXFjCBMK5KEyzV8smhkVvs3xwkAug31KnpzJpiNPtcD5bG1t6", "json"]}' localhost:8899 @@ -312,6 +317,7 @@ more on [blockhash expiration](#blockhash-expiration) below. First, get a recent blockhash using the [`getFees` endpoint](../apps/jsonrpc-api.md#getfees) or the CLI command: + ```bash solana fees --url http://localhost:8899 ``` diff --git a/docs/src/introduction.md b/docs/src/introduction.md index ff10bf779b..b4b6d57ff3 100644 --- a/docs/src/introduction.md +++ b/docs/src/introduction.md @@ -1,4 +1,6 @@ -# Introduction +--- +title: Introduction +--- ## What is Solana? diff --git a/docs/src/offline-signing/README.md b/docs/src/offline-signing/README.md index 4e697ad981..6f77ba2843 100644 --- a/docs/src/offline-signing/README.md +++ b/docs/src/offline-signing/README.md @@ -1,12 +1,15 @@ -# Offline Transaction Signing +--- +title: Offline Transaction Signing +--- Some security models require keeping signing keys, and thus the signing process, separated from transaction creation and network broadcast. Examples include: - * Collecting signatures from geographically disparate signers in a -[multi-signature scheme](../cli/usage.md#multiple-witnesses) - * Signing transactions using an [airgapped](https://en.wikipedia.org/wiki/Air_gap_(networking)) -signing device + +- Collecting signatures from geographically disparate signers in a + [multi-signature scheme](../cli/usage.md#multiple-witnesses) +- Signing transactions using an [airgapped]() + signing device This document describes using Solana's CLI to separately sign and submit a transaction. @@ -14,27 +17,29 @@ transaction. ## Commands Supporting Offline Signing At present, the following commands support offline signing: -* [`create-stake-account`](../cli/usage.md#solana-create-stake-account) -* [`deactivate-stake`](../cli/usage.md#solana-deactivate-stake) -* [`delegate-stake`](../cli/usage.md#solana-delegate-stake) -* [`split-stake`](../cli/usage.md#solana-split-stake) -* [`stake-authorize`](../cli/usage.md#solana-stake-authorize) -* [`stake-set-lockup`](../cli/usage.md#solana-stake-set-lockup) -* [`transfer`](../cli/usage.md#solana-transfer) -* [`withdraw-stake`](../cli/usage.md#solana-withdraw-stake) + +- [`create-stake-account`](../cli/usage.md#solana-create-stake-account) +- [`deactivate-stake`](../cli/usage.md#solana-deactivate-stake) +- [`delegate-stake`](../cli/usage.md#solana-delegate-stake) +- [`split-stake`](../cli/usage.md#solana-split-stake) +- [`stake-authorize`](../cli/usage.md#solana-stake-authorize) +- [`stake-set-lockup`](../cli/usage.md#solana-stake-set-lockup) +- [`transfer`](../cli/usage.md#solana-transfer) +- [`withdraw-stake`](../cli/usage.md#solana-withdraw-stake) ## Signing Transactions Offline To sign a transaction offline, pass the following arguments on the command line -1) `--sign-only`, prevents the client from submitting the signed transaction -to the network. Instead, the pubkey/signature pairs are printed to stdout. -2) `--blockhash BASE58_HASH`, allows the caller to specify the value used to -fill the transaction's `recent_blockhash` field. This serves a number of -purposes, namely: - * Eliminates the need to connect to the network and query a recent blockhash -via RPC - * Enables the signers to coordinate the blockhash in a multiple-signature -scheme + +1. `--sign-only`, prevents the client from submitting the signed transaction + to the network. Instead, the pubkey/signature pairs are printed to stdout. +2. `--blockhash BASE58_HASH`, allows the caller to specify the value used to + fill the transaction's `recent_blockhash` field. This serves a number of + purposes, namely: + _ Eliminates the need to connect to the network and query a recent blockhash + via RPC + _ Enables the signers to coordinate the blockhash in a multiple-signature + scheme ### Example: Offline Signing a Payment @@ -60,10 +65,11 @@ Signers (Pubkey=Signature): To submit a transaction that has been signed offline to the network, pass the following arguments on the command line -1) `--blockhash BASE58_HASH`, must be the same blockhash as was used to sign -2) `--signer BASE58_PUBKEY=BASE58_SIGNATURE`, one for each offline signer. This -includes the pubkey/signature pairs directly in the transaction rather than -signing it with any local keypair(s) + +1. `--blockhash BASE58_HASH`, must be the same blockhash as was used to sign +2. `--signer BASE58_PUBKEY=BASE58_SIGNATURE`, one for each offline signer. This + includes the pubkey/signature pairs directly in the transaction rather than + signing it with any local keypair(s) ### Example: Submitting an Offline Signed Payment diff --git a/docs/src/offline-signing/durable-nonce.md b/docs/src/offline-signing/durable-nonce.md index a53ecbf5f5..b34a95bbe4 100644 --- a/docs/src/offline-signing/durable-nonce.md +++ b/docs/src/offline-signing/durable-nonce.md @@ -1,4 +1,6 @@ -# Durable Transaction Nonces +--- +title: Durable Transaction Nonces +--- Durable transaction nonces are a mechanism for getting around the typical short lifetime of a transaction's [`recent_blockhash`](../transaction.md#recent-blockhash). @@ -19,15 +21,16 @@ creation of more complex account ownership arrangements and derived account addresses not associated with a keypair. The `--nonce-authority ` argument is used to specify this account and is supported by the following commands -* `create-nonce-account` -* `new-nonce` -* `withdraw-from-nonce-account` -* `authorize-nonce-account` + +- `create-nonce-account` +- `new-nonce` +- `withdraw-from-nonce-account` +- `authorize-nonce-account` ### Nonce Account Creation The durable transaction nonce feature uses an account to store the next nonce -value. Durable nonce accounts must be [rent-exempt](../implemented-proposals/rent.md#two-tiered-rent-regime), +value. Durable nonce accounts must be [rent-exempt](../implemented-proposals/rent.md#two-tiered-rent-regime), so need to carry the minimum balance to achieve this. A nonce account is created by first generating a new keypair, then create the account on chain @@ -45,15 +48,9 @@ solana create-nonce-account nonce-keypair.json 1 2SymGjGV4ksPdpbaqWFiDoBz8okvtiik4KE9cnMQgRHrRLySSdZ6jrEcpPifW4xUpp4z66XM9d9wM48sA7peG2XL ``` -{% hint style="info" %} -To keep the keypair entirely offline, use the [Paper Wallet](../paper-wallet/README.md) -keypair generation [instructions](../paper-wallet/paper-wallet-usage.md#seed-phrase-generation.md) -instead -{% endhint %} +> To keep the keypair entirely offline, use the [Paper Wallet](../paper-wallet/README.md) keypair generation [instructions](../paper-wallet/paper-wallet-usage.md#seed-phrase-generation.md) instead -{% hint style="info" %} -[Full usage documentation](../cli/usage.md#solana-create-nonce-account) -{% endhint %} +> [Full usage documentation](../cli/usage.md#solana-create-nonce-account) ### Querying the Stored Nonce Value @@ -73,9 +70,7 @@ solana nonce nonce-keypair.json 8GRipryfxcsxN8mAGjy8zbFo9ezaUsh47TsPzmZbuytU ``` -{% hint style="info" %} -[Full usage documentation](../cli/usage.md#solana-get-nonce) -{% endhint %} +> [Full usage documentation](../cli/usage.md#solana-get-nonce) ### Advancing the Stored Nonce Value @@ -94,9 +89,7 @@ solana new-nonce nonce-keypair.json 44jYe1yPKrjuYDmoFTdgPjg8LFpYyh1PFKJqm5SC1PiSyAL8iw1bhadcAX1SL7KDmREEkmHpYvreKoNv6fZgfvUK ``` -{% hint style="info" %} -[Full usage documentation](../cli/usage.md#solana-new-nonce) -{% endhint %} +> [Full usage documentation](../cli/usage.md#solana-new-nonce) ### Display Nonce Account @@ -116,9 +109,7 @@ minimum balance required: 0.00136416 SOL nonce: DZar6t2EaCFQTbUP4DHKwZ1wT8gCPW2aRfkVWhydkBvS ``` -{% hint style="info" %} -[Full usage documentation](../cli/usage.md#solana-nonce-account) -{% endhint %} +> [Full usage documentation](../cli/usage.md#solana-nonce-account) ### Withdraw Funds from a Nonce Account @@ -136,13 +127,9 @@ solana withdraw-from-nonce-account nonce-keypair.json ~/.config/solana/id.json 0 3foNy1SBqwXSsfSfTdmYKDuhnVheRnKXpoPySiUDBVeDEs6iMVokgqm7AqfTjbk7QBE8mqomvMUMNQhtdMvFLide ``` -{% hint style="info" %} -Close a nonce account by withdrawing the full balance -{% endhint %} +> Close a nonce account by withdrawing the full balance -{% hint style="info" %} -[Full usage documentation](../cli/usage.md#solana-withdraw-from-nonce-account) -{% endhint %} +> [Full usage documentation](../cli/usage.md#solana-withdraw-from-nonce-account) ### Assign a New Authority to a Nonce Account @@ -160,21 +147,21 @@ solana authorize-nonce-account nonce-keypair.json nonce-authority.json 3F9cg4zN9wHxLGx4c3cUKmqpej4oa67QbALmChsJbfxTgTffRiL3iUehVhR9wQmWgPua66jPuAYeL1K2pYYjbNoT ``` -{% hint style="info" %} -[Full usage documentation](../cli/usage.md#solana-authorize-nonce-account) -{% endhint %} +> [Full usage documentation](../cli/usage.md#solana-authorize-nonce-account) ## Other Commands Supporting Durable Nonces To make use of durable nonces with other CLI subcommands, two arguments must be supported. -* `--nonce`, specifies the account storing the nonce value -* `--nonce-authority`, specifies an optional [nonce authority](#nonce-authority) + +- `--nonce`, specifies the account storing the nonce value +- `--nonce-authority`, specifies an optional [nonce authority](#nonce-authority) The following subcommands have received this treatment so far -* [`pay`](../cli/usage.md#solana-pay) -* [`delegate-stake`](../cli/usage.md#solana-delegate-stake) -* [`deactivate-stake`](../cli/usage.md#solana-deactivate-stake) + +- [`pay`](../cli/usage.md#solana-pay) +- [`delegate-stake`](../cli/usage.md#solana-delegate-stake) +- [`deactivate-stake`](../cli/usage.md#solana-deactivate-stake) ### Example Pay Using Durable Nonce @@ -205,10 +192,7 @@ $ solana airdrop -k alice.json 10 Now Alice needs a nonce account. Create one -{% hint style="info" %} -Here, no separate [nonce authority](#nonce-authority) is employed, so `alice.json` -has full authority over the nonce account -{% endhint %} +> Here, no separate [nonce authority](#nonce-authority) is employed, so `alice.json` has full authority over the nonce account ```bash $ solana create-nonce-account -k alice.json nonce.json 1 @@ -231,9 +215,7 @@ Error: Io(Custom { kind: Other, error: "Transaction \"33gQQaoPc9jWePMvDAeyJpcnSP Alice retries the transaction, this time specifying her nonce account and the blockhash stored there -{% hint style="info" %} -Remember, `alice.json` is the [nonce authority](#nonce-authority) in this example -{% endhint %} +> Remember, `alice.json` is the [nonce authority](#nonce-authority) in this example ```bash $ solana nonce-account nonce.json @@ -241,6 +223,7 @@ balance: 1 SOL minimum balance required: 0.00136416 SOL nonce: F7vmkY3DTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7 ``` + ```bash $ solana pay -k alice.json --blockhash F7vmkY3DTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7 --nonce nonce.json bob.json 1 HR1368UKHVZyenmH7yVz5sBAijV6XAPeWbEiXEGVYQorRMcoijeNAbzZqEZiH8cDB8tk65ckqeegFjK8dHwNFgQ @@ -248,13 +231,14 @@ HR1368UKHVZyenmH7yVz5sBAijV6XAPeWbEiXEGVYQorRMcoijeNAbzZqEZiH8cDB8tk65ckqeegFjK8 #### - Success! -The transaction succeeds! Bob receives 1 SOL from Alice and Alice's stored +The transaction succeeds! Bob receives 1 SOL from Alice and Alice's stored nonce advances to a new value ```bash $ solana balance -k bob.json 1 SOL ``` + ```bash $ solana nonce-account nonce.json balance: 1 SOL diff --git a/docs/src/pages/index.js b/docs/src/pages/index.js new file mode 100644 index 0000000000..2438948728 --- /dev/null +++ b/docs/src/pages/index.js @@ -0,0 +1,133 @@ +import React from "react"; +import clsx from "clsx"; +import Layout from "@theme/Layout"; +import Link from "@docusaurus/Link"; +import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; +import useBaseUrl from "@docusaurus/useBaseUrl"; +import styles from "./styles.module.css"; + +const features = [ + { + title: <>Run a Validator, + imageUrl: "docs/running-validator/README", + description: <>Learn how to start a validator on the Solana cluster., + }, + { + title: <>Launch an Application, + imageUrl: "docs/apps/README", + description: <>Build superfast applications with one API., + }, + { + title: <>Participate in Tour de SOL, + imageUrl: "docs/tour-de-sol/README", + description: ( + <> + Participate in our incentivised testnet and earn rewards by finding + bugs. + + ), + }, + { + title: <>Integrate the SOL token into your Exchange, + imageUrl: "docs/integrations/exchange", + description: ( + <> + Follow our extensive integration guide to ensure a seamless user + experience. + + ), + }, + { + title: <>Create or Configure a Solana Wallet, + imageUrl: "docs/wallet-guide/README", + description: ( + <> + Whether you need to create a wallet, check the balance of your funds, or + take a look at what's out there for housing SOL tokens, start here. + + ), + }, + { + title: <>Learn About Solana's Architecture, + imageUrl: "docs/cluster/README", + description: ( + <> + Familiarize yourself with the high level architecture of a Solana + cluster. + + ), + }, // + // { + // title: <>Understand Our Economic Design, + // imageUrl: "docs/implemented-proposals/ed_overview/README", + // description: ( + // <> + // Solana's Economic Design provides a scalable blueprint for long term + // economic development and prosperity. + // + // ), + // } +]; + +function Feature({ imageUrl, title, description }) { + const imgUrl = useBaseUrl(imageUrl); + return ( +
+ {imgUrl && ( + +
+
+

{title}

+
+
+

{description}

+
+
+ + )} +
+ ); +} + +function Home() { + const context = useDocusaurusContext(); + const { siteConfig = {} } = context; + return ( + + {/*
*/} + {/*
+

{siteConfig.title}

+

{siteConfig.tagline}

*/} + {/*
+ + Get Started + +
*/} + {/*
*/} + {/*
*/} +
+ {features && features.length > 0 && ( +
+
+
+ {features.map((props, idx) => ( + + ))} +
+
+
+ )} +
+
+ ); +} + +export default Home; diff --git a/docs/src/pages/styles.module.css b/docs/src/pages/styles.module.css new file mode 100644 index 0000000000..c1aa85121c --- /dev/null +++ b/docs/src/pages/styles.module.css @@ -0,0 +1,37 @@ +/* stylelint-disable docusaurus/copyright-header */ + +/** + * CSS files with the .module.css suffix will be treated as CSS modules + * and scoped locally. + */ + +.heroBanner { + padding: 4rem 0; + text-align: center; + position: relative; + overflow: hidden; +} + +@media screen and (max-width: 966px) { + .heroBanner { + padding: 2rem; + } +} + +.buttons { + display: flex; + align-items: center; + justify-content: center; +} + +.features { + display: flex; + align-items: center; + padding: 2rem 0; + width: 100%; +} + +.featureImage { + height: 200px; + width: 200px; +} diff --git a/docs/src/paper-wallet/README.md b/docs/src/paper-wallet/README.md index 0fe9bc94b0..f26dd74d1c 100644 --- a/docs/src/paper-wallet/README.md +++ b/docs/src/paper-wallet/README.md @@ -1,12 +1,11 @@ -# Paper Wallet +--- +title: Paper Wallet +--- This document describes how to create and use a paper wallet with the Solana CLI tools. -{% hint style="info" %} -We do not intend to advise on how to *securely* create or manage paper wallets. -Please research the security concerns carefully. -{% endhint %} +> We do not intend to advise on how to _securely_ create or manage paper wallets. Please research the security concerns carefully. ## Overview @@ -17,4 +16,4 @@ support keypair input via seed phrases. To learn more about the BIP39 standard, visit the Bitcoin BIPs Github repository [here](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki). -{% page-ref page="usage.md" %} +[Usage](paper-wallet-usage.md) diff --git a/docs/src/paper-wallet/paper-wallet-usage.md b/docs/src/paper-wallet/paper-wallet-usage.md index a8eb845f07..2406661827 100644 --- a/docs/src/paper-wallet/paper-wallet-usage.md +++ b/docs/src/paper-wallet/paper-wallet-usage.md @@ -1,14 +1,12 @@ -# Paper Wallet Usage +--- +title: Paper Wallet Usage +--- Solana commands can be run without ever saving a keypair to disk on a machine. If avoiding writing a private key to disk is a security concern of yours, you've come to the right place. -{% hint style="warning" %} -Even using this secure input method, it's still possible that a private key gets -written to disk by unencrypted memory swaps. It is the user's responsibility to -protect against this scenario. -{% endhint %} +> Even using this secure input method, it's still possible that a private key gets written to disk by unencrypted memory swaps. It is the user's responsibility to protect against this scenario. ## Before You Begin @@ -30,10 +28,7 @@ The seed phrase and passphrase can be used together as a paper wallet. As long as you keep your seed phrase and passphrase stored safely, you can use them to access your account. -{% hint style="info" %} -For more information about how seed phrases work, review this -[Bitcoin Wiki page](https://en.bitcoin.it/wiki/Seed_phrase). -{% endhint %} +> For more information about how seed phrases work, review this [Bitcoin Wiki page](https://en.bitcoin.it/wiki/Seed_phrase). ### Seed Phrase Generation @@ -50,26 +45,20 @@ have not made any errors. solana-keygen new --no-outfile ``` -{% hint style="warning" %} -If the `--no-outfile` flag is **omitted**, the default behavior is to write the -keypair to `~/.config/solana/id.json`, resulting in a -[file system wallet](../file-system-wallet/README.md) -{% endhint %} +> If the `--no-outfile` flag is **omitted**, the default behavior is to write the keypair to `~/.config/solana/id.json`, resulting in a [file system wallet](../file-system-wallet/README.md) The output of this command will display a line like this: + ```bash pubkey: 9ZNTfG4NyQgxy2SWjSiQoUyBPEvXT2xo7fKc5hPYYJ7b ``` -The value shown after `pubkey:` is your *wallet address*. +The value shown after `pubkey:` is your _wallet address_. **Note:** In working with paper wallets and file system wallets, the terms "pubkey" and "wallet address" are sometimes used interchangably. -{% hint style="info" %} -For added security, increase the seed phrase word count using the `--word-count` -argument -{% endhint %} +> For added security, increase the seed phrase word count using the `--word-count` argument For full usage details run: @@ -88,10 +77,7 @@ through entering your seed phrase and a passphrase if you chose to use one. solana-keygen pubkey ASK ``` -{% hint style="info" %} -Note that you could potentially use different passphrases for the same seed -phrase. Each unique passphrase will yield a different keypair. -{% endhint %} +> Note that you could potentially use different passphrases for the same seed phrase. Each unique passphrase will yield a different keypair. The `solana-keygen` tool uses the same BIP39 standard English word list as it does to generate seed phrases. If your seed phrase was generated with another @@ -104,17 +90,12 @@ solana-keygen pubkey ASK --skip-seed-phrase-validation ``` After entering your seed phrase with `solana-keygen pubkey ASK` the console -will display a string of base-58 character. This is the *wallet address* +will display a string of base-58 character. This is the _wallet address_ associated with your seed phrase. -{% hint style="info" %} -Copy the derived address to a USB stick for easy usage on networked computers -{% endhint %} +> Copy the derived address to a USB stick for easy usage on networked computers -{% hint style="info" %} -A common next step is to [check the balance](#checking-account-balance) of the -account associated with a public key -{% endhint %} +> A common next step is to [check the balance](#checking-account-balance) of the account associated with a public key For full usage details run: @@ -142,7 +123,7 @@ keypair generated from your seed phrase, and "Failed" otherwise. All that is needed to check an account balance is the public key of an account. To retrieve public keys securely from a paper wallet, follow the [Public Key Derivation](#public-key-derivation) instructions on an -[air gapped computer](https://en.wikipedia.org/wiki/Air_gap_\(networking\)). +[air gapped computer](). Public keys can then be typed manually or transferred via a USB stick to a networked machine. @@ -160,7 +141,8 @@ solana balance ``` ## Creating Multiple Paper Wallet Addresses -You can create as many wallet addresses as you like. Simply re-run the + +You can create as many wallet addresses as you like. Simply re-run the steps in [Seed Phrase Generation](#seed-phrase-generation) or [Public Key Derivation](#public-key-derivation) to create a new address. Multiple wallet addresses can be useful if you want to transfer tokens between diff --git a/docs/src/proposals/README.md b/docs/src/proposals/README.md index d4d7b5e626..0b42fbe27a 100644 --- a/docs/src/proposals/README.md +++ b/docs/src/proposals/README.md @@ -1,3 +1,5 @@ -# Accepted Design Proposals +--- +title: Accepted Design Proposals +--- The following architectural proposals have been accepted by the Solana team, but are not yet fully implemented. The proposals may be implemented as described, implemented differently as issues in the designs become evident, or not implemented at all. If implemented, the proposal will be moved to [Implemented Proposals](../implemented-proposals/README.md) and the details will be added to relevant sections of the docs. diff --git a/docs/src/proposals/bankless-leader.md b/docs/src/proposals/bankless-leader.md index 889266ad2a..ebc08c8881 100644 --- a/docs/src/proposals/bankless-leader.md +++ b/docs/src/proposals/bankless-leader.md @@ -1,10 +1,12 @@ -# Bankless Leader +--- +title: Bankless Leader +--- A bankless leader does the minimum amount of work to produce a valid block. The leader is tasked with ingress transactions, sorting and filtering valid transactions, arranging them into entries, shredding the entries and broadcasting the shreds. While a validator only needs to reassemble the block and replay execution of well formed entries. The leader does 3x more memory operations before any bank execution than the validator per processed transaction. ## Rationale -Normal bank operation for a spend needs to do 2 loads and 2 stores. With this design leader just does 1 load. so 4x less account\_db work before generating the block. The store operations are likely to be more expensive than reads. +Normal bank operation for a spend needs to do 2 loads and 2 stores. With this design leader just does 1 load. so 4x less account_db work before generating the block. The store operations are likely to be more expensive than reads. When replay stage starts processing the same transactions, it can assume that PoH is valid, and that all the entries are safe for parallel execution. The fee accounts that have been loaded to produce the block are likely to still be in memory, so the additional load should be warm and the cost is likely to be amortized. @@ -25,7 +27,7 @@ The balance cache lookups must reference the same base fork for the entire durat Prior to the balance check, the leader validates all the signatures in the transaction. 1. Verify the accounts are not in use and BlockHash is valid. -2. Check if the fee account is present in the cache, or load the account from accounts\_db and store the lamport balance in the cache. +2. Check if the fee account is present in the cache, or load the account from accounts_db and store the lamport balance in the cache. 3. If the balance is less than the fee, drop the transaction. 4. Subtract the fee from the balance. 5. For all the keys in the transaction that are Credit-Debit and are referenced by an instruction, reduce their balance to 0 in the cache. The account fee is declared as Credit-Debit, but as long as it is not used in any instruction its balance will not be reduced to 0. diff --git a/docs/src/proposals/block-confirmation.md b/docs/src/proposals/block-confirmation.md index 0ff0203af4..c973950a50 100644 --- a/docs/src/proposals/block-confirmation.md +++ b/docs/src/proposals/block-confirmation.md @@ -1,4 +1,6 @@ -# Block Confirmation +--- +title: Block Confirmation +--- A validator votes on a PoH hash for two purposes. First, the vote indicates it believes the ledger is valid up until that point in time. Second, since many @@ -14,16 +16,16 @@ height of the block it is voting on. The account stores the 32 highest heights. ### Problems -* Only the validator knows how to find its own votes directly. +- Only the validator knows how to find its own votes directly. Other components, such as the one that calculates confirmation time, needs to be baked into the validator code. The validator code queries the bank for all accounts owned by the vote program. -* Voting ballots do not contain a PoH hash. The validator is only voting that +- Voting ballots do not contain a PoH hash. The validator is only voting that it has observed an arbitrary block at some height. -* Voting ballots do not contain a hash of the bank state. Without that hash, +- Voting ballots do not contain a hash of the bank state. Without that hash, there is no evidence that the validator executed the transactions and verified there were no double spends. @@ -50,8 +52,8 @@ log the time since the NewBlock transaction was submitted. ### Finality and Payouts -[Tower BFT](../implemented-proposals/tower-bft.md) is the proposed fork selection algorithm. It proposes -that payment to miners be postponed until the *stack* of validator votes reaches +[Tower BFT](../implemented-proposals/tower-bft.md) is the proposed fork selection algorithm. It proposes +that payment to miners be postponed until the _stack_ of validator votes reaches a certain depth, at which point rollback is not economically feasible. The vote program may therefore implement Tower BFT. Vote instructions would need to reference a global Tower account so that it can track cross-block state. @@ -62,7 +64,7 @@ reference a global Tower account so that it can track cross-block state. Using programs and accounts to implement this is a bit tedious. The hardest part is figuring out how much space to allocate in NewBlock. The two variables -are the *active set* and the stakes of those validators. If we calculate the +are the _active set_ and the stakes of those validators. If we calculate the active set at the time NewBlock is submitted, the number of validators to allocate space for is known upfront. If, however, we allow new validators to vote on old blocks, then we'd need a way to allocate space dynamically. diff --git a/docs/src/proposals/cluster-test-framework.md b/docs/src/proposals/cluster-test-framework.md index 076ea45976..70717e9ff8 100644 --- a/docs/src/proposals/cluster-test-framework.md +++ b/docs/src/proposals/cluster-test-framework.md @@ -1,4 +1,6 @@ -# Cluster Test Framework +--- +title: Cluster Test Framework +--- This document proposes the Cluster Test Framework \(CTF\). CTF is a test harness that allows tests to execute against a local, in-process cluster or a deployed cluster. @@ -99,4 +101,3 @@ pub fn test_large_invalid_gossip_nodes( verify_spends(&cluster); } ``` - diff --git a/docs/src/proposals/interchain-transaction-verification.md b/docs/src/proposals/interchain-transaction-verification.md index 58fb86670a..add6e04162 100644 --- a/docs/src/proposals/interchain-transaction-verification.md +++ b/docs/src/proposals/interchain-transaction-verification.md @@ -1,12 +1,14 @@ -# Inter-chain Transaction Verification +--- +title: Inter-chain Transaction Verification +--- ## Problem Inter-chain applications are not new to the digital asset ecosystem; in fact, even the smaller centralized exchanges still categorically dwarf all single chain applications put together in terms of users and volume. They command massive valuations and have spent years effectively optimizing their core products for a broad range of end users. However, their basic operations center around mechanisms that require their users to unilaterally trust them, typically with little to no recourse or protection from accidental loss. This has led to the broader digital asset ecosystem being fractured along network lines because interoperability solutions typically: -* Are technically complex to fully implement -* Create unstable network scale incentive structures -* Require consistent and high level cooperation between stakeholders +- Are technically complex to fully implement +- Create unstable network scale incentive structures +- Require consistent and high level cooperation between stakeholders ## Proposed Solution @@ -36,9 +38,9 @@ The Solana Inter-chain SPV mechanism consists of the following components and pa A contract deployed on Solana which statelessly verifies SPV proofs for the caller. It takes as arguments for validation: -* An SPV proof in the correct format of the blockchain associated with the program -* Reference\(s\) to the relevant block headers to compare that proof against -* The necessary parameters of the transaction to verify +- An SPV proof in the correct format of the blockchain associated with the program +- Reference\(s\) to the relevant block headers to compare that proof against +- The necessary parameters of the transaction to verify If the proof in question is successfully validated, the SPV program saves proof @@ -54,9 +56,9 @@ A contract deployed on Solana which statelessly verifies SPV proofs for the call A contract deployed on Solana which coordinates and intermediates the interaction between Clients and Provers and manages the validation of requests, headers, proofs, etc. It is the primary point of access for Client contracts to access the inter-chain. SPV mechanism. It offers the following core features: -* Submit Proof Request - allows client to place a request for a specific proof or set of proofs -* Cancel Proof Request - allows client to invalidate a pending request -* Fill Proof Request - used by Provers to submit for validation a proof corresponding to a given Proof Request +- Submit Proof Request - allows client to place a request for a specific proof or set of proofs +- Cancel Proof Request - allows client to invalidate a pending request +- Fill Proof Request - used by Provers to submit for validation a proof corresponding to a given Proof Request The SPV program maintains a publicly available listing of valid pending Proof @@ -90,15 +92,14 @@ An account-based data structure used to maintain block headers for the purpose o Store Headers in program sub-accounts indexed by Public address: -* Each sub-account holds one header and has a public key matching the blockhash -* Requires same number of account data lookups as confirmations per verification -* Limit on number of confirmations \(15-20\) via max transaction data ceiling -* No network-wide duplication of individual headers +- Each sub-account holds one header and has a public key matching the blockhash +- Requires same number of account data lookups as confirmations per verification +- Limit on number of confirmations \(15-20\) via max transaction data ceiling +- No network-wide duplication of individual headers Linked List of multiple sub-accounts storing headers: -* Maintain sequential index of storage accounts, many headers per storage account -* Max 2 account data lookups for >99.9% of verifications \(1 for most\) -* Compact sequential data address format allows any number of confirmations and fast lookups -* Facilitates network-wide header duplication inefficiencies - +- Maintain sequential index of storage accounts, many headers per storage account +- Max 2 account data lookups for >99.9% of verifications \(1 for most\) +- Compact sequential data address format allows any number of confirmations and fast lookups +- Facilitates network-wide header duplication inefficiencies diff --git a/docs/src/proposals/ledger-replication-to-implement.md b/docs/src/proposals/ledger-replication-to-implement.md index 3cd1fc155d..b061c2eb6c 100644 --- a/docs/src/proposals/ledger-replication-to-implement.md +++ b/docs/src/proposals/ledger-replication-to-implement.md @@ -1,4 +1,6 @@ -# Ledger Replication +--- +title: Ledger Replication +--- Note: this ledger replication solution was partially implemented, but not completed. The partial implementation was removed by @@ -28,7 +30,7 @@ Archivers are specialized _light clients_. They download a part of the ledger \( We have the following constraints: -* Verification requires generating the CBC blocks. That requires space of 2 +- Verification requires generating the CBC blocks. That requires space of 2 blocks per identity, and 1 CUDA core per identity for the same dataset. So as @@ -36,7 +38,7 @@ We have the following constraints: identities verified concurrently for the same dataset. -* Validators will randomly sample the set of storage proofs to the set that +- Validators will randomly sample the set of storage proofs to the set that they can handle, and only the creators of those chosen proofs will be @@ -48,31 +50,31 @@ We have the following constraints: ### Constants -1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The +1. SLOTS_PER_SEGMENT: Number of slots in a segment of ledger data. The unit of storage for an archiver. -2. NUM\_KEY\_ROTATION\_SEGMENTS: Number of segments after which archivers +2. NUM_KEY_ROTATION_SEGMENTS: Number of segments after which archivers regenerate their encryption keys and select a new dataset to store. -3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof +3. NUM_STORAGE_PROOFS: Number of storage proofs required for a storage proof claim to be successfully rewarded. -4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage +4. RATIO_OF_FAKE_PROOFS: Ratio of fake proofs to real proofs that a storage mining proof claim has to contain to be valid for a reward. -5. NUM\_STORAGE\_SAMPLES: Number of samples required for a storage mining +5. NUM_STORAGE_SAMPLES: Number of samples required for a storage mining proof. -6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate +6. NUM_CHACHA_ROUNDS: Number of encryption rounds performed to generate encrypted state. -7. NUM\_SLOTS\_PER\_TURN: Number of slots that define a single storage epoch or +7. NUM_SLOTS_PER_TURN: Number of slots that define a single storage epoch or a "turn" of the PoRep game. @@ -114,14 +116,14 @@ We have the following constraints: depending on how paranoid an archiver is: - * \(a\) archiver can ask a validator - * \(b\) archiver can ask multiple validators - * \(c\) archiver can ask other archivers - * \(d\) archiver can subscribe to the full transaction stream and generate + - \(a\) archiver can ask a validator + - \(b\) archiver can ask multiple validators + - \(c\) archiver can ask other archivers + - \(d\) archiver can subscribe to the full transaction stream and generate the information itself \(assuming the slot is recent enough\) - * \(e\) archiver can subscribe to an abbreviated transaction stream to + - \(e\) archiver can subscribe to an abbreviated transaction stream to generate the information itself \(assuming the slot is recent enough\) @@ -181,17 +183,17 @@ The Proof of Replication game has 4 primary stages. For each "turn" multiple PoR The 4 stages of the PoRep Game are as follows: 1. Proof submission stage - * Archivers: submit as many proofs as possible during this stage - * Validators: No-op + - Archivers: submit as many proofs as possible during this stage + - Validators: No-op 2. Proof verification stage - * Archivers: No-op - * Validators: Select archivers and verify their proofs from the previous turn + - Archivers: No-op + - Validators: Select archivers and verify their proofs from the previous turn 3. Proof challenge stage - * Archivers: Submit the proof mask with justifications \(for fake proofs submitted 2 turns ago\) - * Validators: No-op + - Archivers: Submit the proof mask with justifications \(for fake proofs submitted 2 turns ago\) + - Validators: No-op 4. Reward collection stage - * Archivers: Collect rewards for 3 turns ago - * Validators: Collect rewards for 3 turns ago + - Archivers: Collect rewards for 3 turns ago + - Validators: Collect rewards for 3 turns ago For each turn of the PoRep game, both Validators and Archivers evaluate each stage. The stages are run as separate transactions on the storage program. @@ -207,7 +209,7 @@ For each turn of the PoRep game, both Validators and Archivers evaluate each sta The validator provides an RPC interface to access the this map. Using this API, clients - can map a segment to an archiver's network address \(correlating it via cluster\_info table\). + can map a segment to an archiver's network address \(correlating it via cluster_info table\). The clients can then send repair requests to the archiver to retrieve segments. @@ -223,17 +225,17 @@ Our solution to this is to force the clients to continue using the same identity ## Validator attacks -* If a validator approves fake proofs, archiver can easily out them by +- If a validator approves fake proofs, archiver can easily out them by showing the initial state for the hash. -* If a validator marks real proofs as fake, no on-chain computation can be done +- If a validator marks real proofs as fake, no on-chain computation can be done to distinguish who is correct. Rewards would have to rely on the results from multiple validators to catch bad actors and archivers from being denied rewards. -* Validator stealing mining proof results for itself. The proofs are derived +- Validator stealing mining proof results for itself. The proofs are derived from a signature from an archiver, since the validator does not know the @@ -249,29 +251,29 @@ Some percentage of fake proofs are also necessary to receive a reward from stora ## Notes -* We can reduce the costs of verification of PoRep by using PoH, and actually +- We can reduce the costs of verification of PoRep by using PoH, and actually make it feasible to verify a large number of proofs for a global dataset. -* We can eliminate grinding by forcing everyone to sign the same PoH hash and +- We can eliminate grinding by forcing everyone to sign the same PoH hash and use the signatures as the seed -* The game between validators and archivers is over random blocks and random +- The game between validators and archivers is over random blocks and random encryption identities and random data samples. The goal of randomization is to prevent colluding groups from having overlap on data or validation. -* Archiver clients fish for lazy validators by submitting fake proofs that +- Archiver clients fish for lazy validators by submitting fake proofs that they can prove are fake. -* To defend against Sybil client identities that try to store the same block we +- To defend against Sybil client identities that try to store the same block we force the clients to store for multiple rounds before receiving a reward. -* Validators should also get rewarded for validating submitted storage proofs +- Validators should also get rewarded for validating submitted storage proofs as incentive for storing the ledger. They can only validate proofs if they @@ -287,35 +289,35 @@ The storage epoch should be the number of slots which results in around 100GB-1T ## Validator behavior -1. Every NUM\_KEY\_ROTATION\_TICKS it also validates samples received from +1. Every NUM_KEY_ROTATION_TICKS it also validates samples received from archivers. It signs the PoH hash at that point and uses the following algorithm with the signature as the input: - * The low 5 bits of the first byte of the signature creates an index into + - The low 5 bits of the first byte of the signature creates an index into another starting byte of the signature. - * The validator then looks at the set of storage proofs where the byte of + - The validator then looks at the set of storage proofs where the byte of the proof's sha state vector starting from the low byte matches exactly with the chosen byte\(s\) of the signature. - * If the set of proofs is larger than the validator can handle, then it + - If the set of proofs is larger than the validator can handle, then it increases to matching 2 bytes in the signature. - * Validator continues to increase the number of matching bytes until a + - Validator continues to increase the number of matching bytes until a workable set is found. - * It then creates a mask of valid proofs and fake proofs and sends it to + - It then creates a mask of valid proofs and fake proofs and sends it to the leader. This is a storage proof confirmation transaction. -2. After a lockout period of NUM\_SECONDS\_STORAGE\_LOCKOUT seconds, the +2. After a lockout period of NUM_SECONDS_STORAGE_LOCKOUT seconds, the validator then submits a storage proof claim transaction which then causes the @@ -331,7 +333,7 @@ The storage epoch should be the number of slots which results in around 100GB-1T seed for the hash result. - * A fake proof should consist of an archiver hash of a signature of a PoH + - A fake proof should consist of an archiver hash of a signature of a PoH value. That way when the archiver reveals the fake proof, it can be @@ -362,9 +364,9 @@ SubmitMiningProof { keys = [archiver_keypair] ``` -Archivers create these after mining their stored ledger data for a certain hash value. The slot is the end slot of the segment of ledger they are storing, the sha\_state the result of the archiver using the hash function to sample their encrypted ledger segment. The signature is the signature that was created when they signed a PoH value for the current storage epoch. The list of proofs from the current storage epoch should be saved in the account state, and then transfered to a list of proofs for the previous epoch when the epoch passes. In a given storage epoch a given archiver should only submit proofs for one segment. +Archivers create these after mining their stored ledger data for a certain hash value. The slot is the end slot of the segment of ledger they are storing, the sha_state the result of the archiver using the hash function to sample their encrypted ledger segment. The signature is the signature that was created when they signed a PoH value for the current storage epoch. The list of proofs from the current storage epoch should be saved in the account state, and then transfered to a list of proofs for the previous epoch when the epoch passes. In a given storage epoch a given archiver should only submit proofs for one segment. -The program should have a list of slots which are valid storage mining slots. This list should be maintained by keeping track of slots which are rooted slots in which a significant portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS\_PER\_SEGMENT number of slots would be added to this set. The program should check that the slot is in this set. The set can be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/Tower BFT state. +The program should have a list of slots which are valid storage mining slots. This list should be maintained by keeping track of slots which are rooted slots in which a significant portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS_PER_SEGMENT number of slots would be added to this set. The program should check that the slot is in this set. The set can be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/Tower BFT state. The program should do a signature verify check on the signature, public key from the transaction submitter and the message of the previous storage epoch PoH value. @@ -379,7 +381,7 @@ keys = [validator_keypair, archiver_keypair(s) (unsigned)] A validator will submit this transaction to indicate that a set of proofs for a given segment are valid/not-valid or skipped where the validator did not look at it. The keypairs for the archivers that it looked at should be referenced in the keys so the program logic can go to those accounts and see that the proofs are generated in the previous epoch. The sampling of the storage proofs should be verified ensuring that the correct proofs are skipped by the validator according to the logic outlined in the validator behavior of sampling. -The included archiver keys will indicate the the storage samples which are being referenced; the length of the proof\_mask should be verified against the set of storage proofs in the referenced archiver account\(s\), and should match with the number of proofs submitted in the previous storage epoch in the state of said archiver account. +The included archiver keys will indicate the the storage samples which are being referenced; the length of the proof_mask should be verified against the set of storage proofs in the referenced archiver account\(s\), and should match with the number of proofs submitted in the previous storage epoch in the state of said archiver account. ### ClaimStorageReward @@ -401,7 +403,7 @@ ChallengeProofValidation { keys = [archiver_keypair, validator_keypair] ``` -This transaction is for catching lazy validators who are not doing the work to validate proofs. An archiver will submit this transaction when it sees a validator has approved a fake SubmitMiningProof transaction. Since the archiver is a light client not looking at the full chain, it will have to ask a validator or some set of validators for this information maybe via RPC call to obtain all ProofValidations for a certain segment in the previous storage epoch. The program will look in the validator account state see that a ProofValidation is submitted in the previous storage epoch and hash the hash\_seed\_value and see that the hash matches the SubmitMiningProof transaction and that the validator marked it as valid. If so, then it will save the challenge to the list of challenges that it has in its state. +This transaction is for catching lazy validators who are not doing the work to validate proofs. An archiver will submit this transaction when it sees a validator has approved a fake SubmitMiningProof transaction. Since the archiver is a light client not looking at the full chain, it will have to ask a validator or some set of validators for this information maybe via RPC call to obtain all ProofValidations for a certain segment in the previous storage epoch. The program will look in the validator account state see that a ProofValidation is submitted in the previous storage epoch and hash the hash_seed_value and see that the hash matches the SubmitMiningProof transaction and that the validator marked it as valid. If so, then it will save the challenge to the list of challenges that it has in its state. ### AdvertiseStorageRecentBlockhash diff --git a/docs/src/proposals/optimistic-confirmation-and-slashing.md b/docs/src/proposals/optimistic-confirmation-and-slashing.md index c3c369b325..13442d137d 100644 --- a/docs/src/proposals/optimistic-confirmation-and-slashing.md +++ b/docs/src/proposals/optimistic-confirmation-and-slashing.md @@ -1,4 +1,6 @@ -# Optimistic Confirmation and Slashing +--- +title: Optimistic Confirmation and Slashing +--- Progress on optimistic confirmation can be tracked here @@ -7,7 +9,7 @@ https://github.com/solana-labs/solana/projects/52 At the end of May, the mainnet-beta is moving to 1.1, and testnet is moving to 1.2. With 1.2, testnet will behave as if it has optimistic finality as long as at least no more than 4.66% of the validators are -acting maliciously. Applications can assume that 2/3+ votes observed in +acting maliciously. Applications can assume that 2/3+ votes observed in gossip confirm a block or that at least 4.66% of the network is violating the protocol. @@ -16,38 +18,37 @@ the protocol. The general idea is that validators must continue voting following their last fork, unless the validator can construct a proof that their current fork may not reach finality. The way validators construct this proof is -by collecting votes for all the forks excluding their own. If the set +by collecting votes for all the forks excluding their own. If the set of valid votes represents over 1/3+X of the epoch stake weight, there may not be a way for the validators current fork to reach 2/3+ finality. The validator hashes the proof (creates a witness) and submits it with -their vote for the alternative fork. But if 2/3+ votes for the same +their vote for the alternative fork. But if 2/3+ votes for the same block, it is impossible for any of the validators to construct this proof, and therefore no validator is able to switch forks and this block will be eventually finalized. - ## Tradeoffs The safety margin is 1/3+X, where X represents the minimum amount of stake that will be slashed in case the protocol is violated. The tradeoff is -that liveness is now reduced by 2X in the worst case. If more than 1/3 - +that liveness is now reduced by 2X in the worst case. If more than 1/3 - 2X of the network is unavailable, the network may stall and will only resume finalizing blocks after the network recovers below 1/3 - 2X of -failing nodes. So far, we haven’t observed a large unavailability hit +failing nodes. So far, we haven’t observed a large unavailability hit on our mainnet, cosmos, or tezos. For our network, which is primarily composed of high availability systems, this seems unlikely. Currently, we have set the threshold percentage to 4.66%, which means that if 23.68% -have failed the network may stop finalizing blocks. For our network, +have failed the network may stop finalizing blocks. For our network, which is primarily composed of high availability systems a 23.68% drop -in availabilty seems unlinkely. 1:10^12 odds assuming five 4.7% staked +in availabilty seems unlinkely. 1:10^12 odds assuming five 4.7% staked nodes with 0.995 of uptime. ## Security Long term average votes per slot has been 670,000,000 votes / 12,000,000 -slots, or 55 out of 64 voting validators. This includes missed blocks due +slots, or 55 out of 64 voting validators. This includes missed blocks due to block producer failures. When a client sees 55/64, or ~86% confirming -a block, it can expect that ~24% or `(86 - 66.666.. + 4.666..)%` of +a block, it can expect that ~24% or `(86 - 66.666.. + 4.666..)%` of the network must be slashed for this block to fail full finalization. ## Why Solana? diff --git a/docs/src/proposals/optimistic_confirmation.md b/docs/src/proposals/optimistic_confirmation.md index e4db65874f..f2b77ea331 100644 --- a/docs/src/proposals/optimistic_confirmation.md +++ b/docs/src/proposals/optimistic_confirmation.md @@ -1,4 +1,6 @@ -# Optimistic Confirmation +--- +title: Optimistic Confirmation +--- ## Primitives @@ -16,11 +18,11 @@ Given a vote `vote(X, S)`, let `S.last == vote.last` be the last slot in `S`. Now we define some "Optimistic Slashing" slashing conditions. The intuition for these is described below: -* `Intuition`: If a validator submits `vote(X, S)`, the same validator -should not have voted on a different fork that "overlaps" this fork. -More concretely, this validator should not have cast another vote -`vote(X', S')` where the range `[X, S.last]` overlaps the range -`[X', S'.last]`, `X != X'`, as shown below: +- `Intuition`: If a validator submits `vote(X, S)`, the same validator + should not have voted on a different fork that "overlaps" this fork. + More concretely, this validator should not have cast another vote + `vote(X', S')` where the range `[X, S.last]` overlaps the range + `[X', S'.last]`, `X != X'`, as shown below: ```text +-------+ @@ -72,7 +74,7 @@ More concretely, this validator should not have cast another vote In the diagram above, note that the vote for `S.last` must have been sent after the vote for `S'.last` (due to lockouts, the higher vote must have been sent -later). Thus, the sequence of votes must have been: `X ... S'.last ... S.last`. +later). Thus, the sequence of votes must have been: `X ... S'.last ... S.last`. This means after the vote on `S'.last`, the validator must have switched back to the other fork at some slot `s > S'.last > X`. Thus, the vote for `S.last` should have used `s` as the "reference" point, not `X`, because that was the @@ -82,21 +84,21 @@ To enforce this, we define the "Optimistic Slashing" slashing conditions. Given any two distinct votes `vote(X, S)`and `vote(X', S')` by the same validator, the votes must satisfy: -* `X <= S.last`, `X' <= S'.last` -* All `s` in `S` are ancestors/descendants of one another, -all `s'` in `S'` are ancsestors/descendants of one another, -* -* `X == X'` implies `S` is parent of `S'` or `S'` is a parent of `S` -* `X' > X` implies `X' > S.last` and `S'.last > S.last` -and for all `s` in `S`, `s + lockout(s) < X'` -* `X > X'` implies `X > S'.last` and `S.last > S'.last` -and for all `s` in `S'`, `s + lockout(s) < X` +- `X <= S.last`, `X' <= S'.last` +- All `s` in `S` are ancestors/descendants of one another, + all `s'` in `S'` are ancsestors/descendants of one another, +- +- `X == X'` implies `S` is parent of `S'` or `S'` is a parent of `S` +- `X' > X` implies `X' > S.last` and `S'.last > S.last` + and for all `s` in `S`, `s + lockout(s) < X'` +- `X > X'` implies `X > S'.last` and `S.last > S'.last` + and for all `s` in `S'`, `s + lockout(s) < X` (The last two rules imply the ranges cannot overlap): Otherwise the validator is slashed. `Range(vote)` - Given a vote `v = vote(X, S)`, define `Range(v)` to be the range - of slots `[X, S.last]`. +of slots `[X, S.last]`. `SP(old_vote, new_vote)` - This is the "Switching Proof" for `old_vote`, the validator's latest vote. Such a proof is necessary anytime a validator switches @@ -113,12 +115,11 @@ The proof is a list of elements `(validator_id, validator_vote(X, S))`, where: 1. The sum of the stakes of all the validator id's `> 1/3` 2. For each `(validator_id, validator_vote(X, S))`, there exists some slot `s` -in `S` where: - * a.`s` is not a common ancestor of both `validator_vote.last` and - `old_vote.last` and `new_vote.last`. - * b. `s` is not a descendant of `validator_vote.last`. - * c. `s + s.lockout() >= old_vote.last` (implies validator is still locked - out on slot `s` at slot `old_vote.last`). + in `S` where: + _ a.`s` is not a common ancestor of both `validator_vote.last` and + `old_vote.last` and `new_vote.last`. + _ b. `s` is not a descendant of `validator_vote.last`. \* c. `s + s.lockout() >= old_vote.last` (implies validator is still locked + out on slot `s` at slot `old_vote.last`). Switching forks without a valid switching proof is slashable. @@ -140,12 +141,13 @@ A block `B` that has reached optimistic confirmation will not be reverted unless at least one validator is slashed. ## Proof: + Assume for the sake of contradiction, a block `B` has achieved `optimistic confirmation` at some slot `B + n` for some `n`, and: -* Another block `B'` that is not a parent or descendant of `B` -was finalized. -* No validators violated any slashing conditions. +- Another block `B'` that is not a parent or descendant of `B` + was finalized. +- No validators violated any slashing conditions. By the definition of `optimistic confirmation`, this means `> 2/3` of validators have each shown some vote `v` of the form `Vote(X, S)` where `X <= B <= v.last`. @@ -164,12 +166,13 @@ Because we know from above `X` for all such votes made by `v` is unique, we know there is such a unique `maximal` vote. ### Lemma 1: + `Claim:` Given a vote `Vote(X, S)` made by a validator `V` in the `Optimistic Validators` set, and `S` contains a vote for a slot `s` for which: -* `s + s.lockout > B`, -* `s` is not an ancestor or descendant of `B`, +- `s + s.lockout > B`, +- `s` is not an ancestor or descendant of `B`, then `X > B`. @@ -264,6 +267,7 @@ Since none of these cases are valid, the assumption must have been invalid, and the claim is proven. ### Lemma 2: + Recall `B'` was the block finalized on a different fork than "optimistically" confirmed" block `B`. @@ -308,13 +312,13 @@ true that `B' > X` ``` `Proof`: Let `Vote(X, S)` be a vote in the `Optimistic Votes` set. Then by -definition, given the "optimistcally confirmed" block `B`, `X <= B <= S.last`. +definition, given the "optimistcally confirmed" block `B`, `X <= B <= S.last`. Because `X` is a parent of `B`, and `B'` is not a parent or ancestor of `B`, then: -* `B' != X` -* `B'` is not a parent of `X` +- `B' != X` +- `B'` is not a parent of `X` Now consider if `B'` < `X`: @@ -324,16 +328,17 @@ and `B'` is not a parent of `X`, then the validator should not have been able to vote on the higher slot `X` that does not descend from `B'`. ### Proof of Safety: + We now aim to show at least one of the validators in the `Optimistic Validators` set violated a slashing rule. First note that in order for `B'` to have been rooted, there must have been -`> 2/3` stake that voted on `B'` or a descendant of `B'`. Given that the +`> 2/3` stake that voted on `B'` or a descendant of `B'`. Given that the `Optimistic Validator` set also contains `> 2/3` of the staked validators, it follows that `> 1/3` of the staked validators: -* Rooted `B'` or a descendant of `B'` -* Also submitted a vote `v` of the form `Vote(X, S)` where `X <= B <= v.last`. +- Rooted `B'` or a descendant of `B'` +- Also submitted a vote `v` of the form `Vote(X, S)` where `X <= B <= v.last`. Let the `Delinquent` set be the set of validators that meet the above criteria. @@ -341,10 +346,10 @@ criteria. By definition, in order to root `B'`, each validator `V` in `Delinquent` must have each made some "switching vote" of the form `Vote(X_v, S_v)` where: -* `S_v.last > B'` -* `S_v.last` is a descendant of `B'`, so it can't be a descendant of `B` -* Because `S_v.last` is not a descendant of `B`, then `X_v` cannot be a -descendant or ancestor of `B`. +- `S_v.last > B'` +- `S_v.last` is a descendant of `B'`, so it can't be a descendant of `B` +- Because `S_v.last` is not a descendant of `B`, then `X_v` cannot be a + descendant or ancestor of `B`. By definition, this delinquent validator `V` also made some vote `Vote(X, S)` in the `Optimistic Votes` where by definition of that set (optimistically @@ -377,19 +382,20 @@ fact that the set of validators in the `Optimistic Voters` set consists of a switching proof),`Vote(X_w, S_w)` that was included in validator `V`'s switching proof for slot `X'`, where `S_w` contains a slot `s` such that: -* `s` is not a common ancestor of `S.last` and `X'` -* `s` is not a descendant of `S.last`. -* `s' + s'.lockout > S.last` +- `s` is not a common ancestor of `S.last` and `X'` +- `s` is not a descendant of `S.last`. +- `s' + s'.lockout > S.last` Because `B` is an ancestor of `S.last`, it is also true then: -* `s` is not a common ancestor of `B` and `X'` -* `s' + s'.lockout > B` + +- `s` is not a common ancestor of `B` and `X'` +- `s' + s'.lockout > B` which was included in `V`'s switching proof. Now because `W` is also a member of `Optimistic Voters`, then by the `Lemma 1` above, given a vote by `W`, `Vote(X_w, S_w)`, where `S_w` contains a vote for -a slot `s` where `s + s.lockout > B`, and `s` is not an ancestor of `B`, then +a slot `s` where `s + s.lockout > B`, and `s` is not an ancestor of `B`, then `X_w > B`. Because validator `V` included vote `Vote(X_w, S_w)` in its proof of switching @@ -399,4 +405,4 @@ for slot `X'`, then his implies validator `V'` submitted vote `Vote(X_w, S_w)` But this is a contradiction because we chose `Vote(X', S')` to be the first vote made by any validator in the `Optimistic Voters` set where `X' > B` and `X'` is -not a descendant of `B`. \ No newline at end of file +not a descendant of `B`. diff --git a/docs/src/proposals/rust-clients.md b/docs/src/proposals/rust-clients.md index 850d7f85e9..c1057c0cdd 100644 --- a/docs/src/proposals/rust-clients.md +++ b/docs/src/proposals/rust-clients.md @@ -1,9 +1,11 @@ -# Rust Clients +--- +title: Rust Clients +--- ## Problem High-level tests, such as bench-tps, are written in terms of the `Client` -trait. When we execute these tests as part of the test suite, we use the +trait. When we execute these tests as part of the test suite, we use the low-level `BankClient` implementation. When we need to run the same test against a cluster, we use the `ThinClient` implementation. The problem with that approach is that it means the trait will continually expand to include new @@ -24,7 +26,7 @@ of `Client`. We would then add a new implementation of `Client`, called `ThinClient` currently resides. After this reorg, any code needing a client would be written in terms of -`ThinClient`. In unit tests, the functionality would be invoked with +`ThinClient`. In unit tests, the functionality would be invoked with `ThinClient`, whereas `main()` functions, benchmarks and integration tests would invoke it with `ThinClient`. @@ -46,7 +48,7 @@ that the `Custom(String)` field should be changed to `Custom(Box)`. `RpcClientTng` and an `AsyncClient` implementation 4. Move all unit-tests from `BankClient` to `ThinClientTng` 5. Add `ClusterClient` -5. Move `ThinClient` users to `ThinClientTng` -6. Delete `ThinClient` and rename `ThinClientTng` to `ThinClient` -7. Move `RpcClient` users to new `ThinClient` -8. Delete `RpcClient` and rename `RpcClientTng` to `RpcClient` +6. Move `ThinClient` users to `ThinClientTng` +7. Delete `ThinClient` and rename `ThinClientTng` to `ThinClient` +8. Move `RpcClient` users to new `ThinClient` +9. Delete `RpcClient` and rename `RpcClientTng` to `RpcClient` diff --git a/docs/src/proposals/simple-payment-and-state-verification.md b/docs/src/proposals/simple-payment-and-state-verification.md index 330b95df32..f662dfffd7 100644 --- a/docs/src/proposals/simple-payment-and-state-verification.md +++ b/docs/src/proposals/simple-payment-and-state-verification.md @@ -1,4 +1,6 @@ -# Simple Payment and State Verification +--- +title: Simple Payment and State Verification +--- It is often useful to allow low resourced clients to participate in a Solana cluster. Be this participation economic or contract execution, verification @@ -67,11 +69,11 @@ sorted by signature. A Block-Merkle is the Merkle Root of all the Entry-Merkles sequenced in the block. -![Block Merkle Diagram](../.gitbook/assets/spv-block-merkle.svg) +![Block Merkle Diagram](/img/spv-block-merkle.svg) A Bank-Hash is the hash of the concatenation of the Block-Merkle and Accounts-Hash -![Bank Hash Diagram](../.gitbook/assets/spv-bank-hash.svg) +![Bank Hash Diagram](/img/spv-bank-hash.svg) An Accounts-Hash is the hash of the concatentation of the state hashes of each account modified during the current slot. @@ -86,7 +88,7 @@ code, but a single status bit to indicate the transaction's success. ### Account State Verification An account's state (balance or other data) can be verified by submitting a -transaction with a ___TBD___ Instruction to the cluster. The client can then +transaction with a **_TBD_** Instruction to the cluster. The client can then use a [Transaction Inclusion Proof](#transaction-inclusion-proof) to verify whether the cluster agrees that the acount has reached the expected state. @@ -102,13 +104,13 @@ of consecutive validation votes. It contains the following: -* Transaction -> Entry-Merkle -> Block-Merkle -> Bank-Hash +- Transaction -> Entry-Merkle -> Block-Merkle -> Bank-Hash And a vector of PoH entries: -* Validator vote entries -* Ticks -* Light entries +- Validator vote entries +- Ticks +- Light entries ```text /// This Entry definition skips over the transactions and only contains the @@ -148,8 +150,8 @@ generated state. For example: -* Epoch validator accounts and their stakes and weights. -* Computed fee rates +- Epoch validator accounts and their stakes and weights. +- Computed fee rates These values should have an entry in the Bank-Hash. They should live under known accounts, and therefore have an index into the hash concatenation. diff --git a/docs/src/proposals/slashing.md b/docs/src/proposals/slashing.md index 23b74ba464..10d4fe988f 100644 --- a/docs/src/proposals/slashing.md +++ b/docs/src/proposals/slashing.md @@ -1,4 +1,6 @@ -# Slashing rules +--- +title: Slashing rules +--- Unlike Proof of Work \(PoW\) where off-chain capital expenses are already deployed at the time of block construction/voting, PoS systems require @@ -28,12 +30,12 @@ In addition to the functional form lockout described above, early implementation may be a numerical approximation based on a First In, First Out \(FIFO\) data structure and the following logic: -* FIFO queue holding 32 votes per active validator -* new votes are pushed on top of queue \(`push_front`\) -* expired votes are popped off top \(`pop_front`\) -* as votes are pushed into the queue, the lockout of each queued vote doubles -* votes are removed from back of queue if `queue.len() > 32` -* the earliest and latest height that has been removed from the back of the +- FIFO queue holding 32 votes per active validator +- new votes are pushed on top of queue \(`push_front`\) +- expired votes are popped off top \(`pop_front`\) +- as votes are pushed into the queue, the lockout of each queued vote doubles +- votes are removed from back of queue if `queue.len() > 32` +- the earliest and latest height that has been removed from the back of the queue should be stored It is likely that a reward will be offered as a % of the slashed amount to any diff --git a/docs/src/proposals/snapshot-verification.md b/docs/src/proposals/snapshot-verification.md index 636da4c87b..8955fb51ed 100644 --- a/docs/src/proposals/snapshot-verification.md +++ b/docs/src/proposals/snapshot-verification.md @@ -1,4 +1,6 @@ -# Snapshot Verification +--- +title: Snapshot Verification +--- ## Problem diff --git a/docs/src/proposals/tick-verification.md b/docs/src/proposals/tick-verification.md index 345d81bf16..f81c5847cb 100644 --- a/docs/src/proposals/tick-verification.md +++ b/docs/src/proposals/tick-verification.md @@ -1,4 +1,6 @@ -# Tick Verification +--- +title: Tick Verification +--- This design the criteria and validation of ticks in a slot. It also describes error handling and slashing conditions encompassing how the system handles @@ -7,8 +9,8 @@ transmissions that do not meet these requirements. # Slot structure Each slot must contain an expected `ticks_per_slot` number of ticks. The last -shred in a slot must contain only the entirety of the last tick, and nothing -else. The leader must also mark this shred containing the last tick with the +shred in a slot must contain only the entirety of the last tick, and nothing +else. The leader must also mark this shred containing the last tick with the `LAST_SHRED_IN_SLOT` flag. Between ticks, there must be `hashes_per_tick` number of hashes. @@ -16,54 +18,53 @@ number of hashes. Malicious transmissions `T` are handled in two ways: -1) If a leader can generate some erronenous transmission `T` and also some -alternate transmission `T'` for the same slot without violating any slashing -rules for duplicate transmissions (for instance if `T'` is a subset of `T`), -then the cluster must handle the possibility of both transmissions being live. +1. If a leader can generate some erronenous transmission `T` and also some + alternate transmission `T'` for the same slot without violating any slashing + rules for duplicate transmissions (for instance if `T'` is a subset of `T`), + then the cluster must handle the possibility of both transmissions being live. Thus this means we cannot mark the erronenous transmission `T` as dead because -the cluster may have reached consensus on `T'`. These cases necessitate a +the cluster may have reached consensus on `T'`. These cases necessitate a slashing proof to punish this bad behavior. -2) Otherwise, we can simply mark the slot as dead and not playable. A slashing -proof may or may not be necessary depending on feasibility. +2. Otherwise, we can simply mark the slot as dead and not playable. A slashing + proof may or may not be necessary depending on feasibility. # Blockstore receiving shreds When blockstore receives a new shred `s`, there are two cases: -1) `s` is marked as `LAST_SHRED_IN_SLOT`, then check if there exists a shred -`s'` in blockstore for that slot where `s'.index > s.index` If so, together `s` -and `s'` constitute a slashing proof. +1. `s` is marked as `LAST_SHRED_IN_SLOT`, then check if there exists a shred + `s'` in blockstore for that slot where `s'.index > s.index` If so, together `s` + and `s'` constitute a slashing proof. -2) Blockstore has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT` -with index `i`. If `s.index > i`, then together `s` and `s'`constitute a -slashing proof. In this case, blockstore will also not insert `s`. - -3) Duplicate shreds for the same index are ignored. Non-duplicate shreds for -the same index are a slashable condition. Details for this case are covered -in the `Leader Duplicate Block Slashing` section. +2. Blockstore has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT` + with index `i`. If `s.index > i`, then together `s` and `s'`constitute a + slashing proof. In this case, blockstore will also not insert `s`. +3. Duplicate shreds for the same index are ignored. Non-duplicate shreds for + the same index are a slashable condition. Details for this case are covered + in the `Leader Duplicate Block Slashing` section. # Replaying and validating ticks -1) Replay stage replays entries from blockstore, keeping track of the number of -ticks it has seen per slot, and verifying there are `hashes_per_tick` number of -hashes between ticcks. After the tick from this last shred has been played, -replay stage then checks the total number of ticks. +1. Replay stage replays entries from blockstore, keeping track of the number of + ticks it has seen per slot, and verifying there are `hashes_per_tick` number of + hashes between ticcks. After the tick from this last shred has been played, + replay stage then checks the total number of ticks. Failure scenario 1: If ever there are two consecutive ticks between which the number of hashes is `!= hashes_per_tick`, mark this slot as dead. -Failure scenario 2: If the number of ticks != `ticks_per_slot`, mark slot as -dead. +Failure scenario 2: If the number of ticks != `ticks_per_slot`, mark slot as +dead. Failure scenario 3: If the number of ticks reaches `ticks_per_slot`, but we still -haven't seen the `LAST_SHRED_IN_SLOT`, mark this slot as dead. +haven't seen the `LAST_SHRED_IN_SLOT`, mark this slot as dead. -2) When ReplayStage reaches a shred marked as the last shred, it checks if this -last shred is a tick. +2. When ReplayStage reaches a shred marked as the last shred, it checks if this + last shred is a tick. Failure scenario: If the signed shred with the `LAST_SHRED_IN_SLOT` flag cannot -be deserialized into a tick (either fails to deserialize or deserializes into -an entry), mark this slot as dead. \ No newline at end of file +be deserialized into a tick (either fails to deserialize or deserializes into +an entry), mark this slot as dead. diff --git a/docs/src/proposals/validator-proposal.md b/docs/src/proposals/validator-proposal.md index ead91e2f3d..74bdfe3a06 100644 --- a/docs/src/proposals/validator-proposal.md +++ b/docs/src/proposals/validator-proposal.md @@ -1,4 +1,6 @@ -# Validator +--- +title: Validator +--- ## History @@ -36,17 +38,17 @@ We unwrap the many abstraction layers and build a single pipeline that can toggle leader mode on whenever the validator's ID shows up in the leader schedule. -![Validator block diagram](../.gitbook/assets/validator-proposal.svg) +![Validator block diagram](/img/validator-proposal.svg) ## Notable changes -* Hoist FetchStage and BroadcastStage out of TPU -* BankForks renamed to Banktree -* TPU moves to new socket-free crate called solana-tpu. -* TPU's BankingStage absorbs ReplayStage -* TVU goes away -* New RepairStage absorbs Shred Fetch Stage and repair requests -* JSON RPC Service is optional - used for debugging. It should instead be part +- Hoist FetchStage and BroadcastStage out of TPU +- BankForks renamed to Banktree +- TPU moves to new socket-free crate called solana-tpu. +- TPU's BankingStage absorbs ReplayStage +- TVU goes away +- New RepairStage absorbs Shred Fetch Stage and repair requests +- JSON RPC Service is optional - used for debugging. It should instead be part of a separate `solana-blockstreamer` executable. -* New MulticastStage absorbs retransmit part of RetransmitStage -* MulticastStage downstream of Blockstore +- New MulticastStage absorbs retransmit part of RetransmitStage +- MulticastStage downstream of Blockstore diff --git a/docs/src/proposals/vote-signing-to-implement.md b/docs/src/proposals/vote-signing-to-implement.md index 3ca9f31ec0..52f32d6ab2 100644 --- a/docs/src/proposals/vote-signing-to-implement.md +++ b/docs/src/proposals/vote-signing-to-implement.md @@ -1,4 +1,6 @@ -# Secure Vote Signing +--- +title: Secure Vote Signing +--- ## Secure Vote Signing @@ -11,21 +13,25 @@ The following sections outline how this architecture would work: ### Message Flow 1. The node initializes the enclave at startup - * The enclave generates an asymmetric key and returns the public key to the + + - The enclave generates an asymmetric key and returns the public key to the node - * The keypair is ephemeral. A new keypair is generated on node bootup. A + - The keypair is ephemeral. A new keypair is generated on node bootup. A new keypair might also be generated at runtime based on some to be determined criteria. - * The enclave returns its attestation report to the node + - The enclave returns its attestation report to the node + 2. The node performs attestation of the enclave \(e.g using Intel's IAS APIs\) - * The node ensures that the Secure Enclave is running on a TPM and is + + - The node ensures that the Secure Enclave is running on a TPM and is signed by a trusted party + 3. The stakeholder of the node grants ephemeral key permission to use its stake. This process is to be determined. @@ -34,13 +40,13 @@ The following sections outline how this architecture would work: using its interface to sign transactions and other data. - * In case of vote signing, the node needs to verify the PoH. The PoH + - In case of vote signing, the node needs to verify the PoH. The PoH verification is an integral part of signing. The enclave would be presented with some verifiable data to check before signing the vote. - * The process of generating the verifiable data in untrusted space is to be determined + - The process of generating the verifiable data in untrusted space is to be determined ### PoH Verification @@ -54,7 +60,7 @@ The following sections outline how this architecture would work: a fork that does not contain `X` increases\). - * The lockout period for `X+y` is still `N` until the node votes again. + - The lockout period for `X+y` is still `N` until the node votes again. 3. The lockout period increment is capped \(e.g. factor `F` applies maximum 32 @@ -64,21 +70,21 @@ The following sections outline how this architecture would work: means - * Enclave is initialized with `N`, `F` and `Factor cap` - * Enclave stores `Factor cap` number of entry IDs on which the node had + - Enclave is initialized with `N`, `F` and `Factor cap` + - Enclave stores `Factor cap` number of entry IDs on which the node had previously voted - * The sign request contains the entry ID for the new vote - * Enclave verifies that new vote's entry ID is on the correct fork + - The sign request contains the entry ID for the new vote + - Enclave verifies that new vote's entry ID is on the correct fork \(following the rules \#1 and \#2 above\) ### Ancestor Verification -This is alternate, albeit, less certain approach to verifying voting fork. 1. The validator maintains an active set of nodes in the cluster 2. It observes the votes from the active set in the last voting period 3. It stores the ancestor/last\_tick at which each node voted 4. It sends new vote request to vote-signing service +This is alternate, albeit, less certain approach to verifying voting fork. 1. The validator maintains an active set of nodes in the cluster 2. It observes the votes from the active set in the last voting period 3. It stores the ancestor/last_tick at which each node voted 4. It sends new vote request to vote-signing service -* It includes previous votes from nodes in the active set, and their +- It includes previous votes from nodes in the active set, and their corresponding ancestors @@ -86,8 +92,8 @@ This is alternate, albeit, less certain approach to verifying voting fork. 1. Th and the vote ancestor matches with majority of the nodes -* It signs the new vote if the check is successful -* It asserts \(raises an alarm of some sort\) if the check is unsuccessful +- It signs the new vote if the check is successful +- It asserts \(raises an alarm of some sort\) if the check is unsuccessful The premise is that the validator can be spoofed at most once to vote on incorrect data. If someone hijacks the validator and submits a vote request for bogus data, that vote will not be included in the PoH \(as it'll be rejected by the cluster\). The next time the validator sends a request to sign the vote, the signing service will detect that validator's last vote is missing \(as part of @@ -108,4 +114,3 @@ A staking client should be configurable to prevent voting on inactive forks. Thi enclave. 2. Need infrastructure for granting stake to an ephemeral key. - diff --git a/docs/src/running-validator/README.md b/docs/src/running-validator/README.md index c8dff594d7..358e681dc6 100644 --- a/docs/src/running-validator/README.md +++ b/docs/src/running-validator/README.md @@ -1,4 +1,6 @@ -# Running a Validator +--- +title: Running a Validator +--- This section describes how run a Solana validator node. diff --git a/docs/src/running-validator/validator-info.md b/docs/src/running-validator/validator-info.md index bbea6804c4..9cb2a0caeb 100644 --- a/docs/src/running-validator/validator-info.md +++ b/docs/src/running-validator/validator-info.md @@ -1,4 +1,6 @@ -# Publishing Validator Info +--- +title: Publishing Validator Info +--- You can publish your validator information to the chain to be publicly visible to other users. @@ -10,7 +12,7 @@ Run the solana CLI to populate a validator info account: solana validator-info publish --keypair ~/validator-keypair.json ``` -For details about optional fields for VALIDATOR\_INFO\_ARGS: +For details about optional fields for VALIDATOR_INFO_ARGS: ```bash solana validator-info publish --help @@ -47,14 +49,16 @@ pubkey with Keybase: 1. Join [https://keybase.io/](https://keybase.io/) and complete the profile for your validator 2. Add your validator **identity pubkey** to Keybase: - * Create an empty file on your local computer called `validator-` - * In Keybase, navigate to the Files section, and upload your pubkey file to + + - Create an empty file on your local computer called `validator-` + - In Keybase, navigate to the Files section, and upload your pubkey file to a `solana` subdirectory in your public folder: `/keybase/public//solana` - * To check your pubkey, ensure you can successfully browse to + - To check your pubkey, ensure you can successfully browse to `https://keybase.pub//solana/validator-` + 3. Add or update your `solana validator-info` with your Keybase username. The CLI will verify the `validator-` file diff --git a/docs/src/running-validator/validator-monitor.md b/docs/src/running-validator/validator-monitor.md index 5fb56f6234..13e67caf2d 100644 --- a/docs/src/running-validator/validator-monitor.md +++ b/docs/src/running-validator/validator-monitor.md @@ -1,4 +1,6 @@ -# Monitoring a Validator +--- +title: Monitoring a Validator +--- ## Check Gossip @@ -44,7 +46,6 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://devnet.solana.com ``` - ## Validator Metrics Metrics are available for local monitoring of your validator. diff --git a/docs/src/running-validator/validator-reqs.md b/docs/src/running-validator/validator-reqs.md index 5dec07202d..e3cdcbe348 100644 --- a/docs/src/running-validator/validator-reqs.md +++ b/docs/src/running-validator/validator-reqs.md @@ -1,46 +1,48 @@ -# Validator Requirements +--- +title: Validator Requirements +--- ## Hardware -* CPU Recommendations - * We recommend a CPU with the highest number of cores as possible. AMD Threadripper or Intel Server \(Xeon\) CPUs are fine. - * We recommend AMD Threadripper as you get a larger number of cores for parallelization compared to Intel. - * Threadripper also has a cost-per-core advantage and a greater number of PCIe lanes compared to the equivalent Intel part. PoH \(Proof of History\) is based on sha256 and Threadripper also supports sha256 hardware instructions. -* SSD size and I/O style \(SATA vs NVMe/M.2\) for a validator - * Minimum example - Samsung 860 Evo 2TB - * Mid-range example - Samsung 860 Evo 4TB - * High-end example - Samsung 860 Evo 4TB -* GPUs - * While a CPU-only node may be able to keep up with the initial idling network, once transaction throughput increases, GPUs will be necessary - * What kind of GPU? - * We recommend Nvidia 2080Ti or 1080Ti series consumer GPU or Tesla series server GPUs. - * We do not currently support OpenCL and therefore do not support AMD GPUs. We have a bounty out for someone to port us to OpenCL. Interested? [Check out our GitHub.](https://github.com/solana-labs/solana) -* Power Consumption - * Approximate power consumption for a validator node running an AMD Threadripper 2950W and 2x 2080Ti GPUs is 800-1000W. +- CPU Recommendations + - We recommend a CPU with the highest number of cores as possible. AMD Threadripper or Intel Server \(Xeon\) CPUs are fine. + - We recommend AMD Threadripper as you get a larger number of cores for parallelization compared to Intel. + - Threadripper also has a cost-per-core advantage and a greater number of PCIe lanes compared to the equivalent Intel part. PoH \(Proof of History\) is based on sha256 and Threadripper also supports sha256 hardware instructions. +- SSD size and I/O style \(SATA vs NVMe/M.2\) for a validator + - Minimum example - Samsung 860 Evo 2TB + - Mid-range example - Samsung 860 Evo 4TB + - High-end example - Samsung 860 Evo 4TB +- GPUs + - While a CPU-only node may be able to keep up with the initial idling network, once transaction throughput increases, GPUs will be necessary + - What kind of GPU? + - We recommend Nvidia 2080Ti or 1080Ti series consumer GPU or Tesla series server GPUs. + - We do not currently support OpenCL and therefore do not support AMD GPUs. We have a bounty out for someone to port us to OpenCL. Interested? [Check out our GitHub.](https://github.com/solana-labs/solana) +- Power Consumption + - Approximate power consumption for a validator node running an AMD Threadripper 2950W and 2x 2080Ti GPUs is 800-1000W. ### Preconfigured Setups Here are our recommendations for low, medium, and high end machine specifications: -| | Low end | Medium end | High end | Notes | -| :--- | :--- | :--- | :--- | :--- | -| CPU | AMD Threadripper 1900x | AMD Threadripper 2920x | AMD Threadripper 2950x | Consider a 10Gb-capable motherboard with as many PCIe lanes and m.2 slots as possible. | -| RAM | 16GB | 32GB | 64GB | | -| OS Drive | Samsung 860 Evo 2TB | Samsung 860 Evo 4TB | Samsung 860 Evo 4TB | Or equivalent SSD | -| Accounts Drive\(s\) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | | -| GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. | +| | Low end | Medium end | High end | Notes | +| :------------------ | :---------------------------------------------------- | :--------------------- | :--------------------- | :------------------------------------------------------------------------------------- | +| CPU | AMD Threadripper 1900x | AMD Threadripper 2920x | AMD Threadripper 2950x | Consider a 10Gb-capable motherboard with as many PCIe lanes and m.2 slots as possible. | +| RAM | 16GB | 32GB | 64GB | | +| OS Drive | Samsung 860 Evo 2TB | Samsung 860 Evo 4TB | Samsung 860 Evo 4TB | Or equivalent SSD | +| Accounts Drive\(s\) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | | +| GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. | ## Software -* We build and run on Ubuntu 18.04. Some users have had trouble when running on Ubuntu 16.04 -* See [Installing Solana](../cli/install-solana-cli-tools.md) for the current Solana software release. +- We build and run on Ubuntu 18.04. Some users have had trouble when running on Ubuntu 16.04 +- See [Installing Solana](../cli/install-solana-cli-tools.md) for the current Solana software release. Be sure to ensure that the machine used is not behind a residential NAT to avoid NAT traversal issues. A cloud-hosted machine works best. **Ensure that IP ports 8000 through 10000 are not blocked for Internet inbound and outbound traffic.** For more information on port forwarding with regards to residential networks, see [this document](http://www.mcs.sdsmt.edu/lpyeatt/courses/314/PortForwardingSetup.pdf). -Prebuilt binaries are available for Linux x86\_64 \(Ubuntu 18.04 recommended\). +Prebuilt binaries are available for Linux x86_64 \(Ubuntu 18.04 recommended\). MacOS or WSL users may build from source. ## GPU Requirements diff --git a/docs/src/running-validator/validator-stake.md b/docs/src/running-validator/validator-stake.md index f8485a0a61..5f27c9dde9 100644 --- a/docs/src/running-validator/validator-stake.md +++ b/docs/src/running-validator/validator-stake.md @@ -1,4 +1,6 @@ -# Staking +--- +title: Staking +--- **By default your validator will have no stake.** This means it will be ineligible to become leader. @@ -81,19 +83,19 @@ so it can take an hour or more for stake to come fully online. To monitor your validator during its warmup period: -* View your vote account:`solana vote-account ~/vote-account-keypair.json` This displays the current state of all the votes the validator has submitted to the network. -* View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json` -* `solana validators` displays the current active stake of all validators, including yours -* `solana stake-history ` shows the history of stake warming up and cooling down over recent epochs -* Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] voted and reset PoH at tick height ####. My next leader slot is ####` -* Once your stake is warmed up, you will see a stake balance listed for your validator by running `solana validators` +- View your vote account:`solana vote-account ~/vote-account-keypair.json` This displays the current state of all the votes the validator has submitted to the network. +- View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json` +- `solana validators` displays the current active stake of all validators, including yours +- `solana stake-history` shows the history of stake warming up and cooling down over recent epochs +- Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] voted and reset PoH at tick height ####. My next leader slot is ####` +- Once your stake is warmed up, you will see a stake balance listed for your validator by running `solana validators` ## Monitor Your Staked Validator Confirm your validator becomes a [leader](../terminology.md#leader) -* After your validator is caught up, use the `solana balance` command to monitor the earnings as your validator is selected as leader and collects transaction fees -* Solana nodes offer a number of useful JSON-RPC methods to return information about the network and your validator's participation. Make a request by using curl \(or another http client of your choosing\), specifying the desired method in JSON-RPC-formatted data. For example: +- After your validator is caught up, use the `solana balance` command to monitor the earnings as your validator is selected as leader and collects transaction fees +- Solana nodes offer a number of useful JSON-RPC methods to return information about the network and your validator's participation. Make a request by using curl \(or another http client of your choosing\), specifying the desired method in JSON-RPC-formatted data. For example: ```bash // Request @@ -105,9 +107,9 @@ Confirm your validator becomes a [leader](../terminology.md#leader) Helpful JSON-RPC methods: -* `getEpochInfo`[An epoch](../terminology.md#epoch) is the time, i.e. number of [slots](../terminology.md#slot), for which a [leader schedule](../terminology.md#leader-schedule) is valid. This will tell you what the current epoch is and how far into it the cluster is. -* `getVoteAccounts` This will tell you how much active stake your validator currently has. A % of the validator's stake is activated on an epoch boundary. You can learn more about staking on Solana [here](../cluster/stake-delegation-and-rewards.md). -* `getLeaderSchedule` At any given moment, the network expects only one validator to produce ledger entries. The [validator currently selected to produce ledger entries](../cluster/leader-rotation.md#leader-rotation) is called the “leader”. This will return the complete leader schedule \(on a slot-by-slot basis\) for currently activated stake, the identity pubkey will show up 1 or more times here. +- `getEpochInfo`[An epoch](../terminology.md#epoch) is the time, i.e. number of [slots](../terminology.md#slot), for which a [leader schedule](../terminology.md#leader-schedule) is valid. This will tell you what the current epoch is and how far into it the cluster is. +- `getVoteAccounts` This will tell you how much active stake your validator currently has. A % of the validator's stake is activated on an epoch boundary. You can learn more about staking on Solana [here](../cluster/stake-delegation-and-rewards.md). +- `getLeaderSchedule` At any given moment, the network expects only one validator to produce ledger entries. The [validator currently selected to produce ledger entries](../cluster/leader-rotation.md#leader-rotation) is called the “leader”. This will return the complete leader schedule \(on a slot-by-slot basis\) for currently activated stake, the identity pubkey will show up 1 or more times here. ## Deactivating Stake @@ -119,7 +121,7 @@ solana deactivate-stake ~/validator-stake-keypair.json ``` Stake is not deactivated immediately and instead cools down in a similar fashion -as stake warm up. Your validator should remain attached to the cluster while +as stake warm up. Your validator should remain attached to the cluster while the stake is cooling down. While cooling down, your stake will continue to earn rewards. Only after stake cooldown is it safe to turn off your validator or withdraw it from the network. Cooldown may take several epochs to complete, diff --git a/docs/src/running-validator/validator-start.md b/docs/src/running-validator/validator-start.md index ae929f9c14..8ff63d4c48 100644 --- a/docs/src/running-validator/validator-start.md +++ b/docs/src/running-validator/validator-start.md @@ -1,4 +1,6 @@ -# Starting a Validator +--- +title: Starting a Validator +--- ## Configure Solana CLI @@ -85,11 +87,12 @@ The corresponding identity public key can now be viewed by running: ```bash solana-keygen pubkey ASK ``` + and then entering your seed phrase. See [Paper Wallet Usage](../paper-wallet/paper-wallet-usage.md) for more info. -------- +--- ### Vanity Keypair @@ -101,7 +104,7 @@ solana-keygen grind --starts-with e1v1s Depending on the string requested, it may take days to find a match... ------- +--- Your validator identity keypair uniquely identifies your validator within the network. **It is crucial to back-up this information.** @@ -137,6 +140,7 @@ Airdrop yourself some SOL to get started: ```bash solana airdrop 10 ``` + Note that airdrops are only available on Devnet. Testnet SOL can be obtained by participating in the [Tour de SOL](../tour-de-sol/README.md) program. @@ -222,11 +226,11 @@ To force validator logging to the console add a `--log -` argument, otherwise the validator will automatically log to a file. > Note: You can use a -[paper wallet seed phrase](../paper-wallet/paper-wallet-usage.md) -for your `--identity` and/or -`--vote-account` keypairs. To use these, pass the respective argument as -`solana-validator --identity ASK ... --vote-account ASK ...` and you will be -prompted to enter your seed phrases and optional passphrase. +> [paper wallet seed phrase](../paper-wallet/paper-wallet-usage.md) +> for your `--identity` and/or +> `--vote-account` keypairs. To use these, pass the respective argument as +> `solana-validator --identity ASK ... --vote-account ASK ...` and you will be +> prompted to enter your seed phrases and optional passphrase. Confirm your validator connected to the network by opening a new terminal and running: diff --git a/docs/src/running-validator/validator-troubleshoot.md b/docs/src/running-validator/validator-troubleshoot.md index d2d90cc5ad..3afb484098 100644 --- a/docs/src/running-validator/validator-troubleshoot.md +++ b/docs/src/running-validator/validator-troubleshoot.md @@ -1,19 +1,22 @@ -# Troubleshooting +--- +title: Troubleshooting +--- There is a **\#validator-support** Discord channel available to reach other testnet participants, [https://discord.gg/pquxPsq](https://discord.gg/pquxPsq). ## Useful Links & Discussion -* [Network Explorer](http://explorer.solana.com/) -* [Testnet Metrics Dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=60s&orgId=2) -* Validator chat channels - * [\#validator-support](https://discord.gg/rZsenD) General support channel for any Validator related queries. - * [\#tourdesol](https://discord.gg/BdujK2) Discussion and support channel for Tour de SOL participants ([What is Tour de SOL?](https://solana.com/tds/)). - * [\#tourdesol-announcements](https://discord.gg/Q5TxEC) The single source of truth for critical information relating to Tour de SOL - * [\#tourdesol-stage0](https://discord.gg/Xf8tES) Discussion for events within Tour de SOL Stage 0. Stage 0 includes all the dry-run -* [Core software repo](https://github.com/solana-labs/solana) -* [Tour de SOL Docs](https://docs.solana.com/tour-de-sol) -* [TdS repo](https://github.com/solana-labs/tour-de-sol) -* [TdS metrics dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds) + +- [Network Explorer](http://explorer.solana.com/) +- [Testnet Metrics Dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=60s&orgId=2) +- Validator chat channels + - [\#validator-support](https://discord.gg/rZsenD) General support channel for any Validator related queries. + - [\#tourdesol](https://discord.gg/BdujK2) Discussion and support channel for Tour de SOL participants ([What is Tour de SOL?](https://solana.com/tds/)). + - [\#tourdesol-announcements](https://discord.gg/Q5TxEC) The single source of truth for critical information relating to Tour de SOL + - [\#tourdesol-stage0](https://discord.gg/Xf8tES) Discussion for events within Tour de SOL Stage 0. Stage 0 includes all the dry-run +- [Core software repo](https://github.com/solana-labs/solana) +- [Tour de SOL Docs](https://docs.solana.com/tour-de-sol) +- [TdS repo](https://github.com/solana-labs/tour-de-sol) +- [TdS metrics dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds) Can't find what you're looking for? Send an email to ryan@solana.com or reach out to @rshea\#2622 on Discord. diff --git a/docs/src/terminology.md b/docs/src/terminology.md index e695c87556..eed6105722 100644 --- a/docs/src/terminology.md +++ b/docs/src/terminology.md @@ -1,4 +1,6 @@ -# Terminology +--- +title: Terminology +--- The following terms are used throughout the documentation. @@ -78,9 +80,9 @@ An entry on the [ledger](terminology.md#ledger) either a [tick](terminology.md#t A preimage resistant [hash](terminology.md#hash) over the final contents of an entry, which acts as the [entry's](terminology.md#entry) globally unique identifier. The hash serves as evidence of: - * The entry being generated after a duration of time - * The specified [transactions](terminology.md#transaction) are those included in the entry - * The entry's position with respect to other entries in [ledger](terminology.md#ledger) +- The entry being generated after a duration of time +- The specified [transactions](terminology.md#transaction) are those included in the entry +- The entry's position with respect to other entries in [ledger](terminology.md#ledger) See [Proof of History](terminology.md#proof-of-history). diff --git a/docs/src/theme/Footer/index.js b/docs/src/theme/Footer/index.js new file mode 100644 index 0000000000..73a4a8ae8d --- /dev/null +++ b/docs/src/theme/Footer/index.js @@ -0,0 +1,126 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +import React from "react"; +import clsx from "clsx"; + +import Link from "@docusaurus/Link"; +import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; +import useBaseUrl from "@docusaurus/useBaseUrl"; +import styles from "./styles.module.css"; + +function FooterLink({ to, href, label, prependBaseUrlToHref, ...props }) { + const toUrl = useBaseUrl(to); + const normalizedHref = useBaseUrl(href, { forcePrependBaseUrl: true }); + + return ( + + {label} + + ); +} + +const FooterLogo = ({ url, alt }) => ( + {alt} +); + +function Footer() { + const context = useDocusaurusContext(); + const { siteConfig = {} } = context; + const { themeConfig = {} } = siteConfig; + const { footer } = themeConfig; + + const { copyright, links = [], logo = {} } = footer || {}; + const logoUrl = useBaseUrl(logo.src); + + if (!footer) { + return null; + } + + return ( +
+
+ {links && links.length > 0 && ( +
+ {links.map((linkItem, i) => ( +
+ {linkItem.title != null ? ( +

{linkItem.title}

+ ) : null} + {linkItem.items != null && + Array.isArray(linkItem.items) && + linkItem.items.length > 0 ? ( +
    + {linkItem.items.map((item, key) => + item.html ? ( +
  • + ) : ( +
  • + +
  • + ) + )} +
+ ) : null} +
+ ))} +
+ )} + {(logo || copyright) && ( +
+ {logo && logo.src && ( +
+ {logo.href ? ( + + + + ) : ( + + )} +
+ )} + +
+
+ )} +
+
+ ); +} + +export default Footer; diff --git a/docs/src/theme/Footer/styles.module.css b/docs/src/theme/Footer/styles.module.css new file mode 100644 index 0000000000..58c5842c88 --- /dev/null +++ b/docs/src/theme/Footer/styles.module.css @@ -0,0 +1,15 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +.footerLogoLink { + opacity: 0.5; + transition: opacity 0.15s ease-in-out; +} + +.footerLogoLink:hover { + opacity: 1; +} diff --git a/docs/src/theme/SearchBar.js b/docs/src/theme/SearchBar.js new file mode 100644 index 0000000000..d1ad6ac915 --- /dev/null +++ b/docs/src/theme/SearchBar.js @@ -0,0 +1 @@ +export { default } from "@docusaurus/Noop"; diff --git a/docs/src/tour-de-sol/README.md b/docs/src/tour-de-sol/README.md index 4d92c5ab39..10b9523e12 100644 --- a/docs/src/tour-de-sol/README.md +++ b/docs/src/tour-de-sol/README.md @@ -1,4 +1,6 @@ -# Introduction +--- +title: Introduction +--- ## Welcome! diff --git a/docs/src/tour-de-sol/participation/steps-to-create-a-validator.md b/docs/src/tour-de-sol/participation/steps-to-create-a-validator.md index 5584c73ce0..ecb56d1a32 100644 --- a/docs/src/tour-de-sol/participation/steps-to-create-a-validator.md +++ b/docs/src/tour-de-sol/participation/steps-to-create-a-validator.md @@ -1,9 +1,11 @@ -# Steps to create a validator +--- +title: Steps to create a validator +--- To create a Solana validator, follow the normal [validator workflow](../../running-validator/validator-start.md) targeting the [Testnet cluster](../../clusters.md). Note that Testnet validators are automatically staked by a process that runs -every Epoch. If your validator is running correctly then in a couple of days it +every Epoch. If your validator is running correctly then in a couple of days it will be staked (and automatically destaked if offline for a prolonged period of time). diff --git a/docs/src/tour-de-sol/participation/validator-public-key-registration.md b/docs/src/tour-de-sol/participation/validator-public-key-registration.md index a852498d2d..1826aa6fa2 100644 --- a/docs/src/tour-de-sol/participation/validator-public-key-registration.md +++ b/docs/src/tour-de-sol/participation/validator-public-key-registration.md @@ -1,6 +1,8 @@ -# Create a validator public key +--- +title: Create a validator public key +--- -In order to participate you need to first register. See [Registration info](../registration/how-to-register.md). +In order to participate you need to first register. See [Registration info](../registration/how-to-register.md). In order to obtain your allotment of SOL you need to publish your validator's identity public key under your keybase.io account. @@ -19,9 +21,7 @@ validator's identity public key under your keybase.io account. solana-keygen pubkey ~/validator-keypair.json ``` -{% hint style="info" %} -Note: The "validator-keypair.json” file is also your \(ed25519\) private key. -{% endhint %} +> Note: The "validator-keypair.json” file is also your \(ed25519\) private key. Your validator identity keypair uniquely identifies your validator within the network. **It is crucial to back-up this information.** diff --git a/docs/src/tour-de-sol/participation/validator-technical-requirements.md b/docs/src/tour-de-sol/participation/validator-technical-requirements.md index 17d0fc7143..1444713f63 100644 --- a/docs/src/tour-de-sol/participation/validator-technical-requirements.md +++ b/docs/src/tour-de-sol/participation/validator-technical-requirements.md @@ -1,4 +1,6 @@ -# Requirements to run a validator +--- +title: Requirements to run a validator +--- ## Hardware @@ -6,4 +8,4 @@ See [suggested hardware configuration here](../../running-validator/validator-re ## Software -* We build and run on Ubuntu 18.04. Some users have had trouble when running on Ubuntu 16.04 +- We build and run on Ubuntu 18.04. Some users have had trouble when running on Ubuntu 16.04 diff --git a/docs/src/tour-de-sol/registration/confidentiality.md b/docs/src/tour-de-sol/registration/confidentiality.md index d07f3d1b46..17b65a4007 100644 --- a/docs/src/tour-de-sol/registration/confidentiality.md +++ b/docs/src/tour-de-sol/registration/confidentiality.md @@ -1,4 +1,6 @@ -# Confidentiality +--- +title: Confidentiality +--- **Section 8 of the** [**TOUR DE SOL PARTICIPATION TERMS**](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) **references confidentiality.** diff --git a/docs/src/tour-de-sol/registration/how-to-register.md b/docs/src/tour-de-sol/registration/how-to-register.md index 782b32d52d..aa423f8a43 100644 --- a/docs/src/tour-de-sol/registration/how-to-register.md +++ b/docs/src/tour-de-sol/registration/how-to-register.md @@ -1,24 +1,30 @@ -# How To Register +--- +title: How To Register +--- #### 1) Registration Form + [Submit the registration form here](https://forms.gle/gQYLozj5u7yKU3HG6) #### 2) KYC/AML (via Coinlist) + [Register for KYC/AML + Participation Agreement here](https://tsm.coinlist.co/solana-staking) -*If you’ve completed KYC/AML previously for either SLP or TdS with the same +_If you’ve completed KYC/AML previously for either SLP or TdS with the same entity/individual then you will not need to go through this again. -We do not accept U.S. entities or individuals.* +We do not accept U.S. entities or individuals._ #### 3) Join Our Discord + **Required** for all Tour de SOL validators, as this is our primary communication channel: https://discord.gg/N3mqAfa ### Next Steps - - Check out our documentation to start getting familiar with how to -[Run a Validator](../../running-validator/README.md) - - After you've finished the registration and KYC, you will receive an email -with instructions to finish your on-boarding process. +- Check out our documentation to start getting familiar with how to + [Run a Validator](../../running-validator/README.md) - - See you on Discord! +- After you've finished the registration and KYC, you will receive an email + with instructions to finish your on-boarding process. + +- See you on Discord! diff --git a/docs/src/tour-de-sol/registration/rewards.md b/docs/src/tour-de-sol/registration/rewards.md index 74eb0b3a12..d2319ffabe 100644 --- a/docs/src/tour-de-sol/registration/rewards.md +++ b/docs/src/tour-de-sol/registration/rewards.md @@ -1,4 +1,6 @@ -# Rewards +--- +title: Rewards +--- ## Reward Calculation diff --git a/docs/src/tour-de-sol/registration/terms-of-participation.md b/docs/src/tour-de-sol/registration/terms-of-participation.md index 71cb885d01..e04053a752 100644 --- a/docs/src/tour-de-sol/registration/terms-of-participation.md +++ b/docs/src/tour-de-sol/registration/terms-of-participation.md @@ -1,6 +1,5 @@ -# Terms of Participation +--- +title: Terms of Participation +--- Please see the official [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) for complete details. -Download below: - -{% file src="../../.gitbook/assets/solana-tour-de-sol-participation-terms-20190723.pdf" caption="Tour de SOL Participation Terms" %} diff --git a/docs/src/tour-de-sol/registration/validator-registration-and-rewards-faq.md b/docs/src/tour-de-sol/registration/validator-registration-and-rewards-faq.md index 1e14b48339..2924fac2eb 100644 --- a/docs/src/tour-de-sol/registration/validator-registration-and-rewards-faq.md +++ b/docs/src/tour-de-sol/registration/validator-registration-and-rewards-faq.md @@ -1,4 +1,6 @@ -# Registration FAQ +--- +title: Registration FAQ +--- The [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) should be considered the authoritative resource for any participation questions. @@ -18,7 +20,7 @@ We’ve partnered with Coinlist to manage the Tour de Sol KYC/AML process. You c ## What are my responsibilities as a Tour de Sol participant? -Please see section “2c Tour de SOL Details” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) for details. +Please see section “2c Tour de SOL Details” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) for details. ### How is the “50% of the active Tour event time” responsibility calculated? @@ -40,7 +42,7 @@ Please also see Section “4 Prohibited Conduct” of the [TOUR DE SOL PARTICIPA #### As referenced in section “4 Prohibited Conduct”, what would be an example of providing Tour Services from a jurisdiction other than the jurisdiction of my residence? Does this mean my server has to reside in the jurisdiction of my residence? -No. Servers can be in other jurisdictions that differ from a participant’s residency. By signing the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) the participant has represented that they are delivering their services from the US if they reside there or from outside the US if they are not residing within the US. +No. Servers can be in other jurisdictions that differ from a participant’s residency. By signing the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) the participant has represented that they are delivering their services from the US if they reside there or from outside the US if they are not residing within the US. ## How are rewards calculated? diff --git a/docs/src/tour-de-sol/submitting-bugs.md b/docs/src/tour-de-sol/submitting-bugs.md index 0770c470c9..f8450d6865 100644 --- a/docs/src/tour-de-sol/submitting-bugs.md +++ b/docs/src/tour-de-sol/submitting-bugs.md @@ -1,4 +1,6 @@ -# Submitting Bugs +--- +title: Submitting Bugs +--- Please submit all bugs and feedback as [issues in this Github repo](https://github.com/solana-labs/solana/issues). diff --git a/docs/src/tour-de-sol/useful-links.md b/docs/src/tour-de-sol/useful-links.md index 3c3cd2824e..66f443e5e8 100644 --- a/docs/src/tour-de-sol/useful-links.md +++ b/docs/src/tour-de-sol/useful-links.md @@ -1,17 +1,16 @@ --- +title: Useful Links & Discussion description: Where to go after you've read this guide --- -# Useful Links & Discussion - -* [Network Explorer](http://explorer.solana.com/) -* [TdS metrics dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds) -* Validator chat channels - * [\#validator-support](https://discord.gg/rZsenD) General support channel for any Validator related queries that don’t fall under Tour de SOL. - * [\#tourdesol-validators](https://discord.gg/BdujK2) Discussion and support channel for Tour de SOL participants. - * [\#tourdesol-announcements](https://discord.gg/Q5TxEC) The single source of truth for critical information relating to Tour de SOL -* [Core software repo](https://github.com/solana-labs/solana) -* [Submit bugs and feedback in this repo](https://github.com/solana-labs/solana/issues) +- [Network Explorer](http://explorer.solana.com/) +- [TdS metrics dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds) +- Validator chat channels + - [\#validator-support](https://discord.gg/rZsenD) General support channel for any Validator related queries that don’t fall under Tour de SOL. + - [\#tourdesol-validators](https://discord.gg/BdujK2) Discussion and support channel for Tour de SOL participants. + - [\#tourdesol-announcements](https://discord.gg/Q5TxEC) The single source of truth for critical information relating to Tour de SOL +- [Core software repo](https://github.com/solana-labs/solana) +- [Submit bugs and feedback in this repo](https://github.com/solana-labs/solana/issues) {% hint style="info" %} Can't find what you're looking for? Send an email to ryan@solana.com or reach out to @rshea\#2622 on Discord. diff --git a/docs/src/transaction.md b/docs/src/transaction.md index ca14f21b10..84b358ef3b 100644 --- a/docs/src/transaction.md +++ b/docs/src/transaction.md @@ -1,11 +1,13 @@ -# Anatomy of a Transaction +--- +title: Anatomy of a Transaction +--- This section documents the binary format of a transaction. ## Transaction Format A transaction contains a [compact-array](#compact-array-format) of signatures, -followed by a [message](#message-format). Each item in the signatures array is +followed by a [message](#message-format). Each item in the signatures array is a [digital signature](#signature-format) of the given message. The Solana runtime verifies that the number of signatures matches the number in the first 8 bits of the [message header](#message-header-format). It also verifies that @@ -16,7 +18,6 @@ the same index in the message's account addresses array. Each digital signature is in the ed25519 binary format and consumes 64 bytes. - ## Message Format A message contains a [header](#message-header-format), followed by a @@ -28,7 +29,7 @@ recent [blockhash](#blockhash-format), followed by a compact-array of The message header contains three unsigned 8-bit values. The first value is the number of required signatures in the containing transaction. The second value -is the number of those corresponding account addresses that are read-only. The +is the number of those corresponding account addresses that are read-only. The third value in the message header is the number of read-only account addresses not requiring signatures. @@ -40,24 +41,21 @@ accounts following. The addresses that do not require signatures follow the addresses that do, again with read-write accounts first and read-only accounts following. - ### Blockhash Format A blockhash contains a 32-byte SHA-256 hash. It is used to indicate when a client last observed the ledger. Validators will reject transactions when the blockhash is too old. - ## Instruction Format An instruction contains a program ID index, followed by a compact-array of account address indexes, followed by a compact-array of opaque 8-bit data. The program ID index is used to identify an on-chain program that can interpret the -opaque data. The program ID index is an unsigned 8-bit index to an account +opaque data. The program ID index is an unsigned 8-bit index to an account address in the message's array of account addresses. The account address indexes are each an unsigned 8-bit index into that same array. - ## Compact-Array Format A compact-array is serialized as the array length, followed by each array item. @@ -66,7 +64,7 @@ The array length is a special multi-byte encoding called compact-u16. ### Compact-u16 Format A compact-u16 is a multi-byte encoding of 16 bits. The first byte contains the -lower 7 bits of the value in its lower 7 bits. If the value is above 0x7f, the +lower 7 bits of the value in its lower 7 bits. If the value is above 0x7f, the high bit is set and the next 7 bits of the value are placed into the lower 7 bits of a second byte. If the value is above 0x3fff, the high bit is set and the remaining 2 bits of the value are placed into the lower 2 bits of a third diff --git a/docs/src/validator/README.md b/docs/src/validator/README.md index c99047bad8..849e8a57a9 100644 --- a/docs/src/validator/README.md +++ b/docs/src/validator/README.md @@ -1,6 +1,8 @@ -# Anatomy of a Validator +--- +title: Anatomy of a Validator +--- -![Validator block diagrams](../.gitbook/assets/validator.svg) +![Validator block diagrams](/img/validator.svg) ## Pipelining diff --git a/docs/src/validator/blockstore.md b/docs/src/validator/blockstore.md index d199340f34..60383704e8 100644 --- a/docs/src/validator/blockstore.md +++ b/docs/src/validator/blockstore.md @@ -1,4 +1,6 @@ -# Blockstore +--- +title: Blockstore +--- After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized. @@ -40,21 +42,23 @@ Repair requests for recent shreds are served out of RAM or recent files and out 1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\). 2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing: - * `slot_index` - The index of this slot - * `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\) - * `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\). - * `received` - The highest received shred index for the slot - * `next_slots` - A list of future slots this slot could chain to. Used when rebuilding + + - `slot_index` - The index of this slot + - `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\) + - `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\). + - `received` - The highest received shred index for the slot + - `next_slots` - A list of future slots this slot could chain to. Used when rebuilding the ledger to find possible fork points. - * `last_index` - The index of the shred that is flagged as the last shred for this slot. This flag on a shred will be set by the leader for a slot when they are transmitting the last shred for a slot. - * `is_rooted` - True iff every block from 0...slot forms a full sequence without any holes. We can derive is\_rooted for each slot with the following rules. Let slot\(n\) be the slot with index `n`, and slot\(n\).is\_full\(\) is true if the slot with index `n` has all the ticks expected for that slot. Let is\_rooted\(n\) be the statement that "the slot\(n\).is\_rooted is true". Then: + - `last_index` - The index of the shred that is flagged as the last shred for this slot. This flag on a shred will be set by the leader for a slot when they are transmitting the last shred for a slot. + - `is_rooted` - True iff every block from 0...slot forms a full sequence without any holes. We can derive is_rooted for each slot with the following rules. Let slot\(n\) be the slot with index `n`, and slot\(n\).is_full\(\) is true if the slot with index `n` has all the ticks expected for that slot. Let is_rooted\(n\) be the statement that "the slot\(n\).is_rooted is true". Then: + + is_rooted\(0\) is_rooted\(n+1\) iff \(is_rooted\(n\) and slot\(n\).is_full\(\) - is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\) 3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`. 4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details. -5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`. +5. Update notifications - The Blockstore notifies listeners when slot\(n\).is_rooted is flipped from false to true for any `n`. ## Blockstore APIs diff --git a/docs/src/validator/gossip.md b/docs/src/validator/gossip.md index a5ebc3d402..6dcdd7c278 100644 --- a/docs/src/validator/gossip.md +++ b/docs/src/validator/gossip.md @@ -1,4 +1,6 @@ -# Gossip Service +--- +title: Gossip Service +--- The Gossip Service acts as a gateway to nodes in the control plane. Validators use the service to ensure information is available to all other nodes in a cluster. The service broadcasts information using a gossip protocol. @@ -24,15 +26,17 @@ Upon receiving a push message, a node examines the message for: 1. Duplication: if the message has been seen before, the node drops the message and may respond with `PushMessagePrune` if forwarded from a low staked node 2. New data: if the message is new to the node - * Stores the new information with an updated version in its cluster info and + + - Stores the new information with an updated version in its cluster info and purges any previous older value - * Stores the message in `pushed_once` \(used for detecting duplicates, + - Stores the message in `pushed_once` \(used for detecting duplicates, purged after `PUSH_MSG_TIMEOUT * 5` ms\) - * Retransmits the messages to its own push peers + - Retransmits the messages to its own push peers + 3. Expiration: nodes drop push messages that are older than `PUSH_MSG_TIMEOUT` ### Push Peers, Prune Message @@ -59,8 +63,8 @@ An eclipse attack is an attempt to take over the set of node connections with ad This is relevant to our implementation in the following ways. -* Pull messages select a random node from the network. An eclipse attack on _pull_ would require an attacker to influence the random selection in such a way that only adversarial nodes are selected for pull. -* Push messages maintain an active set of nodes and select a random fanout for every push message. An eclipse attack on _push_ would influence the active set selection, or the random fanout selection. +- Pull messages select a random node from the network. An eclipse attack on _pull_ would require an attacker to influence the random selection in such a way that only adversarial nodes are selected for pull. +- Push messages maintain an active set of nodes and select a random fanout for every push message. An eclipse attack on _push_ would influence the active set selection, or the random fanout selection. ### Time and Stake based weights @@ -86,6 +90,5 @@ The active push protocol described here is based on [Plum Tree](https://haslab.uminho.pt/sites/default/files/jop/files/lpr07a.pdf). The main differences are: -* Push messages have a wallclock that is signed by the originator. Once the wallclock expires the message is dropped. A hop limit is difficult to implement in an adversarial setting. -* Lazy Push is not implemented because its not obvious how to prevent an adversary from forging the message fingerprint. A naive approach would allow an adversary to be prioritized for pull based on their input. - +- Push messages have a wallclock that is signed by the originator. Once the wallclock expires the message is dropped. A hop limit is difficult to implement in an adversarial setting. +- Lazy Push is not implemented because its not obvious how to prevent an adversary from forging the message fingerprint. A naive approach would allow an adversary to be prioritized for pull based on their input. diff --git a/docs/src/validator/runtime.md b/docs/src/validator/runtime.md index 2bc71f6ce9..f02030b13e 100644 --- a/docs/src/validator/runtime.md +++ b/docs/src/validator/runtime.md @@ -1,4 +1,6 @@ -# The Runtime +--- +title: The Runtime +--- ## The Runtime @@ -20,7 +22,7 @@ Transactions are batched and processed in a pipeline. The TPU and TVU follow a s The TVU runtime ensures that PoH verification occurs before the runtime processes any transactions. -![Runtime pipeline](../.gitbook/assets/runtime.svg) +![Runtime pipeline](/img/runtime.svg) At the _execute_ stage, the loaded accounts have no data dependencies, so all the programs can be executed in parallel. @@ -37,13 +39,13 @@ Execution of the program involves mapping the program's public key to an entrypo The interface is best described by the `Instruction::data` that the user encodes. -* `CreateAccount` - This allows the user to create an account with an allocated data array and assign it to a Program. -* `CreateAccountWithSeed` - Same as `CreateAccount`, but the new account's address is derived from - - the funding account's pubkey, - - a mnemonic string (seed), and - - the pubkey of the Program -* `Assign` - Allows the user to assign an existing account to a program. -* `Transfer` - Transfers lamports between accounts. +- `CreateAccount` - This allows the user to create an account with an allocated data array and assign it to a Program. +- `CreateAccountWithSeed` - Same as `CreateAccount`, but the new account's address is derived from + - the funding account's pubkey, + - a mnemonic string (seed), and + - the pubkey of the Program +- `Assign` - Allows the user to assign an existing account to a program. +- `Transfer` - Transfers lamports between accounts. ### Program State Security @@ -53,15 +55,15 @@ To pass messages between programs, the receiving program must accept the message ### Notes -* There is no dynamic memory allocation. Client's need to use `CreateAccount` instructions to create memory before passing it to another program. This instruction can be composed into a single transaction with the call to the program itself. -* `CreateAccount` and `Assign` guarantee that when account is assigned to the program, the Account's data is zero initialized. -* Transactions that assign an account to a program or allocate space must be signed by the Account address' private key unless the Account is being created by `CreateAccountWithSeed`, in which case there is no corresponding private key for the account's address/pubkey. -* Once assigned to program an Account cannot be reassigned. -* Runtime guarantees that a program's code is the only code that can modify Account data that the Account is assigned to. -* Runtime guarantees that the program can only spend lamports that are in accounts that are assigned to it. -* Runtime guarantees the balances belonging to accounts are balanced before and after the transaction. -* Runtime guarantees that instructions all executed successfully when a transaction is committed. +- There is no dynamic memory allocation. Client's need to use `CreateAccount` instructions to create memory before passing it to another program. This instruction can be composed into a single transaction with the call to the program itself. +- `CreateAccount` and `Assign` guarantee that when account is assigned to the program, the Account's data is zero initialized. +- Transactions that assign an account to a program or allocate space must be signed by the Account address' private key unless the Account is being created by `CreateAccountWithSeed`, in which case there is no corresponding private key for the account's address/pubkey. +- Once assigned to program an Account cannot be reassigned. +- Runtime guarantees that a program's code is the only code that can modify Account data that the Account is assigned to. +- Runtime guarantees that the program can only spend lamports that are in accounts that are assigned to it. +- Runtime guarantees the balances belonging to accounts are balanced before and after the transaction. +- Runtime guarantees that instructions all executed successfully when a transaction is committed. ## Future Work -* [Continuations and Signals for long running Transactions](https://github.com/solana-labs/solana/issues/1485) +- [Continuations and Signals for long running Transactions](https://github.com/solana-labs/solana/issues/1485) diff --git a/docs/src/validator/tpu.md b/docs/src/validator/tpu.md index 06bd9bd2e0..54a5ea1825 100644 --- a/docs/src/validator/tpu.md +++ b/docs/src/validator/tpu.md @@ -1,3 +1,5 @@ -# TPU +--- +title: TPU +--- -![TPU Block Diagram](../.gitbook/assets/tpu.svg) +![TPU Block Diagram](/img/tpu.svg) diff --git a/docs/src/validator/tvu.md b/docs/src/validator/tvu.md index cc8b0f62e0..71a78c4559 100644 --- a/docs/src/validator/tvu.md +++ b/docs/src/validator/tvu.md @@ -1,7 +1,9 @@ -# TVU +--- +title: TVU +--- -![TVU Block Diagram](../.gitbook/assets/tvu.svg) +![TVU Block Diagram](/img/tvu.svg) ## Retransmit Stage -![Retransmit Block Diagram](../.gitbook/assets/retransmit_stage.svg) +![Retransmit Block Diagram](/img/retransmit_stage.svg) diff --git a/docs/src/wallet-guide/README.md b/docs/src/wallet-guide/README.md index 175c0c7811..243da37eec 100644 --- a/docs/src/wallet-guide/README.md +++ b/docs/src/wallet-guide/README.md @@ -1,34 +1,38 @@ -# Solana Wallet Guide +--- +title: Solana Wallet Guide +--- + This document describes the different wallet options that are available to users of Solana who want to be able to send, receive and interact with SOL tokens on the Solana blockchain. ## What is a Wallet? + A crypto wallet is a device or application that stores a collection of keys and can be used to send, receive, -and track ownership of cryptocurrencies. Wallets can take many forms. +and track ownership of cryptocurrencies. Wallets can take many forms. A wallet might be a directory or file in your computer's file system, -a piece of paper, or a specialized device called a *hardware wallet*. +a piece of paper, or a specialized device called a _hardware wallet_. There are also various smartphone apps and computer programs that provide a user-friendly way to create and manage wallets. -A *keypair* is a securely generated *private key* and its -cryptographically-derived *public key*. A private key and its corresponding -public key are together known as a *keypair*. +A _keypair_ is a securely generated _private key_ and its +cryptographically-derived _public key_. A private key and its corresponding +public key are together known as a _keypair_. A wallet contains a collection of one or more keypairs and provides some means to interact with them. -The *public key* (commonly shortened to *pubkey*) is known as the wallet's -*receiving address* or simply its *address*. The wallet address **may be shared -and displayed freely**. When another party is going to send some amount of +The _public key_ (commonly shortened to _pubkey_) is known as the wallet's +_receiving address_ or simply its _address_. The wallet address **may be shared +and displayed freely**. When another party is going to send some amount of cryptocurrency to a wallet, they need to know the wallet's receiving address. Depending on a blockchain's implementation, the address can also be used to view certain information about a wallet, such as viewing the balance, but has no ability to change anything about the wallet or withdraw any tokens. -The *private key* is required to digitally sign any transactions to send +The _private key_ is required to digitally sign any transactions to send cryptocurrencies to another address or to make any changes to the wallet. -The private key **must never be shared**. If someone gains access to the +The private key **must never be shared**. If someone gains access to the private key to a wallet, they can withdraw all the tokens it contains. If the private key for a wallet is lost, any tokens that have been sent to that wallet's address are **permanently lost**. @@ -44,6 +48,7 @@ of security and convenience. you first will need to create a wallet.** ## Supported Wallets + Solana supports supports several types of wallets in the Solana native command-line app as well as wallets from third-parties. @@ -58,4 +63,4 @@ may be more appropriate, as new features on the Solana blockchain will always be supported on the command line first before being integrated into third-party solutions. -{% page-ref page="cli.md" %} \ No newline at end of file +{% page-ref page="cli.md" %} diff --git a/docs/src/wallet-guide/apps.md b/docs/src/wallet-guide/apps.md index bc1dda619e..b95c85e664 100644 --- a/docs/src/wallet-guide/apps.md +++ b/docs/src/wallet-guide/apps.md @@ -1,4 +1,7 @@ -# App Wallets +--- +title: App Wallets +--- + Solana supports multiple third-party apps which should provide a familiar experience for most people who are new or experienced with using crypto wallets. @@ -13,8 +16,8 @@ The app is free and getting your wallet set up only takes a few minutes. ### Trust Wallet Security Tokens held in Trust Wallet are only as secure as the device on which the app is -installed. Anyone who is able to unlock your phone or tablet may be able to -use the Trust Wallet app and transfer your tokens. To improve security, +installed. Anyone who is able to unlock your phone or tablet may be able to +use the Trust Wallet app and transfer your tokens. To improve security, you can add a passcode to the Trust Wallet application. To add a Trust Wallet passcode, open the app and go to Settings -> Security -> Passcode. @@ -26,15 +29,17 @@ your Trust Wallet keys on a different device. From there, they could sign transactions from that device rather than on your own phone or tablet. The seed phrase is displayed when a new wallet is created and it can also be viewed at any later time in the app by following these steps: - - Go to Setting -> Wallets - - Under the Options menu for a particular wallet tap "Show Recovery Phrase" -{% page-ref page="trust-wallet.md" %} +- Go to Setting -> Wallets +- Under the Options menu for a particular wallet tap "Show Recovery Phrase" + +[Trust Wallet](trust-wallet.md) ## Ledger Live with Ledger Nano S + [Ledger Live](https://www.ledger.com/ledger-live) is available as free desktop -software and as a free app for iOS and Android. It is used to manage apps and -crypto accounts on a Ledger *hardware wallet*, which must be purchased +software and as a free app for iOS and Android. It is used to manage apps and +crypto accounts on a Ledger _hardware wallet_, which must be purchased separately and connected to the device running Ledger Live. [Ledger Nano S](https://shop.ledger.com/products/ledger-nano-s) is a @@ -45,4 +50,4 @@ keep track of the hardware device. Solana does not support the Ledger Nano **X** at this time. -{% page-ref page="ledger-live.md" %} \ No newline at end of file +[Ledger Live](ledger-live.md) diff --git a/docs/src/wallet-guide/cli.md b/docs/src/wallet-guide/cli.md index 34e860c0d1..b284794d91 100644 --- a/docs/src/wallet-guide/cli.md +++ b/docs/src/wallet-guide/cli.md @@ -1,4 +1,6 @@ -# Command Line Wallets +--- +title: Command Line Wallets +--- Solana supports several different types of wallets that can be used to interface directly with the Solana command-line tools. @@ -11,7 +13,7 @@ To use a Command Line Wallet, you must first [install the Solana CLI tools](../c ## File System Wallet -A *file system wallet*, aka an FS wallet, is a directory in your computer's +A _file system wallet_, aka an FS wallet, is a directory in your computer's file system. Each file in the directory holds a keypair. ### File System Wallet Security @@ -28,11 +30,11 @@ keypairs are stored on your computer as files, a skilled hacker with physical access to your computer may be able to access it. Using an encrypted hard drive, such as FileVault on MacOS, minimizes that risk. -{% page-ref page="../file-system-wallet/README.md" %} +[File System Wallet](../file-system-wallet/README.md") ## Paper Wallet -A *paper wallet* is a collection of *seed phrases* written on paper. A seed +A _paper wallet_ is a collection of _seed phrases_ written on paper. A seed phrase is some number of words (typically 12 or 24) that can be used to regenerate a keypair on demand. @@ -47,7 +49,7 @@ wallets are used in conjunction with Paper wallets and custody services are an excellent way to secure a large number of tokens for a long period of time. -{% page-ref page="../paper-wallet/README.md" %} +[Paper Wallets](../paper-wallet/README.md) ## Hardware Wallet @@ -62,4 +64,4 @@ security and convenience for cryptocurrencies. It effectively automates the process of offline signing while retaining nearly all the convenience of a file system wallet. -{% page-ref page="../hardware-wallet/README.md" %} +[Hardware Wallets]("../hardware-wallet/README.md") diff --git a/docs/src/wallet-guide/ledger-live.md b/docs/src/wallet-guide/ledger-live.md index d25c67adf6..c8279436c3 100644 --- a/docs/src/wallet-guide/ledger-live.md +++ b/docs/src/wallet-guide/ledger-live.md @@ -1,4 +1,7 @@ -# Ledger Live and Ledger Nano S +--- +title: Ledger Live and Ledger Nano S +--- + This document describes how to set up a [Ledger Nano S hardware wallet](https://shop.ledger.com/products/ledger-nano-s) with the [Ledger Live](https://www.ledger.com/ledger-live) software. @@ -13,56 +16,61 @@ Users may [use a Ledger Nano S with the Solana command line tools](../hardware-wallets/ledger.md). ## Set up a Ledger Nano S - - Order a [Nano S from Ledger](https://shop.ledger.com/products/ledger-nano-s) - - Follow the instructions for device setup included in the package, - or [Ledger's Start page](https://www.ledger.com/start/) + +- Order a [Nano S from Ledger](https://shop.ledger.com/products/ledger-nano-s) +- Follow the instructions for device setup included in the package, + or [Ledger's Start page](https://www.ledger.com/start/) - [Install the latest device firmware](https://support.ledgerwallet.com/hc/en-us/articles/360002731113-Update-Ledger-Nano-S-firmware) ## Install Ledger Live - - Install [Ledger Live desktop software](https://www.ledger.com/ledger-live/), - or - - Install the [Ledger Live app for iOS](https://apps.apple.com/app/id1361671700) - or [Ledger Live for Android](https://play.google.com/store/apps/details?id=com.ledger.live). - - Requires iOS 9.1 or later. Compatible with iPhone, iPad, and iPod touch. - - Requires Android 7.0 or later. - - Connect your Nano S to your device and follow the instructions + +- Install [Ledger Live desktop software](https://www.ledger.com/ledger-live/), + or +- Install the [Ledger Live app for iOS](https://apps.apple.com/app/id1361671700) + or [Ledger Live for Android](https://play.google.com/store/apps/details?id=com.ledger.live). + - Requires iOS 9.1 or later. Compatible with iPhone, iPad, and iPod touch. + - Requires Android 7.0 or later. +- Connect your Nano S to your device and follow the instructions ## Install the Solana App on your Nano S - - Open Ledger Live - - Currently Ledger Live needs to be in "Developer Mode" - (Settings > Experimental Features > Developer Mode) to see our app. - ![Enabling Developer Mode](../.gitbook/assets/ledger-live-enable-developer-mode.png) +- Open Ledger Live +- Currently Ledger Live needs to be in "Developer Mode" + (Settings > Experimental Features > Developer Mode) to see our app. - - Go to Manager in the app and find "Solana" in the App Catalog and - click Install - - Make sure your device is plugged in via USB and is unlocked with its PIN - - You may be prompted on the Nano S to confirm the install of Solana App - - "Solana" should now show as "Installed" in the Ledger Live Manager +![Enabling Developer Mode](/img/ledger-live-enable-developer-mode.png) - ![Installed Solana App in Manager](../.gitbook/assets/ledger-live-latest-version-installed.png) +- Go to Manager in the app and find "Solana" in the App Catalog and + click Install + - Make sure your device is plugged in via USB and is unlocked with its PIN +- You may be prompted on the Nano S to confirm the install of Solana App +- "Solana" should now show as "Installed" in the Ledger Live Manager + +![Installed Solana App in Manager](/img/ledger-live-latest-version-installed.png) ## Upgrade to the latest version of the Solana App + To make sure you have the latest functionality, if you are using an older version of the Solana App, please upgrade to version v0.2.2 by following these steps. - - Connect your Nano S to your computer an unlock it by entering your PIN on the - device - - Open Ledger Live and click on "Manager" in the left pane - - On your Nano S, click both buttons when prompted to "Allow Manager" - - Click the "Update All" button to update the Solana app to the latest version - (v.0.2.2) +- Connect your Nano S to your computer an unlock it by entering your PIN on the + device +- Open Ledger Live and click on "Manager" in the left pane +- On your Nano S, click both buttons when prompted to "Allow Manager" +- Click the "Update All" button to update the Solana app to the latest version + (v.0.2.2) - ![Upgrade All button in Manager](../.gitbook/assets/ledger-live-update-available-v0.2.2.png) +![Upgrade All button in Manager](/img/ledger-live-update-available-v0.2.2.png) - - Once the upgrade is finished, confirm v0.2.2 is installed under "Apps Installed" +- Once the upgrade is finished, confirm v0.2.2 is installed under "Apps Installed" - ![Upgrade complete](../.gitbook/assets/ledger-live-latest-version-installed.png) +![Upgrade complete](/img/ledger-live-latest-version-installed.png) ## Interact with Solana network + - To interact with your Ledger wallet on our live network, please see our -instructions on how to -[use a Ledger Nano S with the Solana command line tools](../hardware-wallets/ledger.md). + instructions on how to + [use a Ledger Nano S with the Solana command line tools](../hardware-wallets/ledger.md). ## Support diff --git a/docs/src/wallet-guide/support.md b/docs/src/wallet-guide/support.md index 3801095308..2a2b4f350f 100644 --- a/docs/src/wallet-guide/support.md +++ b/docs/src/wallet-guide/support.md @@ -1,7 +1,10 @@ -# Support / Troubleshooting +--- +title: Support / Troubleshooting +--- + If you have questions or are having trouble setting up or using your wallet of choice, please make sure you've read through all the relevant pages in our -[Wallet Guide](README.md). The Solana team is working hard to support new +[Wallet Guide](README.md). The Solana team is working hard to support new features on popular wallets, and we do our best to keep our documents up to date with the latest available features. diff --git a/docs/src/wallet-guide/trust-wallet.md b/docs/src/wallet-guide/trust-wallet.md index fd3455de3e..91a3e723e7 100644 --- a/docs/src/wallet-guide/trust-wallet.md +++ b/docs/src/wallet-guide/trust-wallet.md @@ -1,4 +1,7 @@ -# Trust Wallet +--- +title: Trust Wallet +--- + Trust Wallet is an app for your smartphone or tablet and is the fastest and simplest way for most users to get started with a Solana wallet to send and receive tokens. @@ -7,101 +10,109 @@ receive tokens. #### iOS - - Open the App Store - - Download “Trust: Crypto & Bitcoin Wallet” from Six Days LLC - - Requires iOS 13.0 or higher - - Open Trust Wallet and follow the app prompts to get started +- Open the App Store +- Download “Trust: Crypto & Bitcoin Wallet” from Six Days LLC + - Requires iOS 13.0 or higher +- Open Trust Wallet and follow the app prompts to get started #### Android - - Open the Play Store - - Download “Trust Crypto Wallet” from Six Days LLC - - Requires Android 6.0 or higher - - Open Trust Wallet and follow the app prompts to get started +- Open the Play Store +- Download “Trust Crypto Wallet” from Six Days LLC + - Requires Android 6.0 or higher +- Open Trust Wallet and follow the app prompts to get started ## Add Solana (SOL) tokens to your wallet - - From the main page, go to the “Tokens” tab at the top of the screen - - Tap the “+” icon at the top right corner - - Search for “Solana” in the search page, and when the “Solana SOL” token is -shown, slide the slider to enable this token. - - You can now tap the Solana icon to access your Solana wallet. + +- From the main page, go to the “Tokens” tab at the top of the screen +- Tap the “+” icon at the top right corner +- Search for “Solana” in the search page, and when the “Solana SOL” token is + shown, slide the slider to enable this token. +- You can now tap the Solana icon to access your Solana wallet. [Trust Wallet Official Docs: How to Add or Remove a Coin](https://community.trustwallet.com/t/how-to-add-or-remove-a-coin/896) ## Receiving SOL tokens - - To receive SOL tokens that you’ve purchased or earned, you need to send your -Receive Address to whoever is sending you tokens. - - Tap “Receive” to view a QR code and your text address, which is a long string - of letters and numbers. - - Tap “Copy” or “Share” to send the address. - - Be very careful when you copy and paste your receive address anywhere that -you do not miss any characters at the beginning or end of the string. - - If you send an incorrect Receive address to someone and they send tokens -to that address, **those tokens will be lost forever**. + +- To receive SOL tokens that you’ve purchased or earned, you need to send your + Receive Address to whoever is sending you tokens. +- Tap “Receive” to view a QR code and your text address, which is a long string + of letters and numbers. +- Tap “Copy” or “Share” to send the address. +- Be very careful when you copy and paste your receive address anywhere that + you do not miss any characters at the beginning or end of the string. +- If you send an incorrect Receive address to someone and they send tokens + to that address, **those tokens will be lost forever**. [Trust Wallet Official Docs: How to Find my Receiving Address](https://community.trustwallet.com/t/how-to-find-my-receiving-address/2006) ## Sending SOL tokens + You can send SOL from your Trust Wallet to any other valid address on the Solana -network. Once you know the other party's receiving address, +network. Once you know the other party's receiving address, go to the main page of the wallet from which you want to send some SOL tokens: - - Tap the "Solana" icon. - - Tap "Send" - - Under "Recipient Address": - - If you already have the receiving address you are going to send to, - tap "Paste" - - If you are transferring to another Trust Wallet user, you can use the app's - QR code reader by tapping the square icon to the right of the "Paste" button. - - Under "Amount", enter the amount of SOL you want to send, or tap "Max" - to send **all** of the SOL in your wallet to the new address - - Tap "Next" to view a summary/confirmation page to review before submitting + +- Tap the "Solana" icon. +- Tap "Send" +- Under "Recipient Address": + - If you already have the receiving address you are going to send to, + tap "Paste" + - If you are transferring to another Trust Wallet user, you can use the app's + QR code reader by tapping the square icon to the right of the "Paste" button. +- Under "Amount", enter the amount of SOL you want to send, or tap "Max" + to send **all** of the SOL in your wallet to the new address +- Tap "Next" to view a summary/confirmation page to review before submitting the transaction - - The real-time network transaction fee will be shown. This fee will be + - The real-time network transaction fee will be shown. This fee will be paid by the sending account in addition to the full amount transferred. - - If you chose to send all your tokens to the new address by selecting + - If you chose to send all your tokens to the new address by selecting "Max" under amount, the amount sent to the recipient will be the account - balance *minus* the current network transaction fee. - - **Make sure you entered the "To" address correctly and that it matches your + balance _minus_ the current network transaction fee. + - **Make sure you entered the "To" address correctly and that it matches your desired wallet's receiving address!** - - Tap "Send" to submit the transaction. The transaction will show as "Pending" - for a few seconds, and then will show as "Sent". +- Tap "Send" to submit the transaction. The transaction will show as "Pending" + for a few seconds, and then will show as "Sent". [Trust Wallet Official Docs: Sending Cryptocurrencies](https://community.trustwallet.com/t/sending-cryptocurrencies/65) ## Using Multiple Wallet Addresses + Trust Wallet allows you to create multiple wallets, each of which is -secured by a different set of random seed words. If you want to use more than +secured by a different set of random seed words. If you want to use more than one Solana address, follow these steps. #### Create an additional wallet - - In the main page of the Trust Wallet App, tap "Settings" in the bottom-right - corner. - - Tap "Wallets" - - Tap "+" to create a new Wallet - - Follow the steps above to add SOL tokens to your new wallet -*Note: In the "Wallets" page under "Settings" you can re-name each of your wallets. - Consider giving each a descriptive name if you are planning to use your wallets - for different purposes, so you don't mistake one for the other.* +- In the main page of the Trust Wallet App, tap "Settings" in the bottom-right + corner. +- Tap "Wallets" +- Tap "+" to create a new Wallet +- Follow the steps above to add SOL tokens to your new wallet + +_Note: In the "Wallets" page under "Settings" you can re-name each of your wallets. +Consider giving each a descriptive name if you are planning to use your wallets +for different purposes, so you don't mistake one for the other._ #### Transferring SOL between your wallets + Just like you can transfer SOL to another party, you can transfer SOL between wallets that you own. - - Copy the receive address of your **newly created** wallet. - - Make sure your **new** wallet is selected by going to - "Settings" --> "Wallets", then tap on the name of your new wallet. - - Tap the "Solana" icon. - - Tap "Receive" then tap "Copy". - - Select your previous/original wallet which already has some SOL by going to - "Settings" --> "Wallets", then tap on the name of your **original** wallet. - - Now follow the same process for [sending SOL tokens](#sending-sol-tokens) +- Copy the receive address of your **newly created** wallet. + - Make sure your **new** wallet is selected by going to + "Settings" --> "Wallets", then tap on the name of your new wallet. + - Tap the "Solana" icon. + - Tap "Receive" then tap "Copy". +- Select your previous/original wallet which already has some SOL by going to + "Settings" --> "Wallets", then tap on the name of your **original** wallet. + - Now follow the same process for [sending SOL tokens](#sending-sol-tokens) using your **new** wallet's receiving address as the address in the "To" field when you make the transfer. ## Troubleshooting + If you are having trouble setting up your Trust Wallet app, check out their - [Community Help Center](https://community.trustwallet.com/c/helpcenter) +[Community Help Center](https://community.trustwallet.com/c/helpcenter) ## Support diff --git a/docs/static/.nojekyll b/docs/static/.nojekyll new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/static/img/android-chrome-192x192.png b/docs/static/img/android-chrome-192x192.png new file mode 100644 index 0000000000..b8222a9b6b Binary files /dev/null and b/docs/static/img/android-chrome-192x192.png differ diff --git a/docs/static/img/android-chrome-512x512.png b/docs/static/img/android-chrome-512x512.png new file mode 100644 index 0000000000..598b8a1722 Binary files /dev/null and b/docs/static/img/android-chrome-512x512.png differ diff --git a/docs/static/img/apple-touch-icon.png b/docs/static/img/apple-touch-icon.png new file mode 100644 index 0000000000..282b79dee8 Binary files /dev/null and b/docs/static/img/apple-touch-icon.png differ diff --git a/docs/static/img/dark-mark-white.inline.svg b/docs/static/img/dark-mark-white.inline.svg new file mode 100644 index 0000000000..fac7a4fd11 --- /dev/null +++ b/docs/static/img/dark-mark-white.inline.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/docs/static/img/favicon-16x16.png b/docs/static/img/favicon-16x16.png new file mode 100644 index 0000000000..b1cb5f9548 Binary files /dev/null and b/docs/static/img/favicon-16x16.png differ diff --git a/docs/static/img/favicon-32x32.png b/docs/static/img/favicon-32x32.png new file mode 100644 index 0000000000..7580b1776e Binary files /dev/null and b/docs/static/img/favicon-32x32.png differ diff --git a/docs/static/img/favicon.ico b/docs/static/img/favicon.ico new file mode 100644 index 0000000000..86b9f3ce33 Binary files /dev/null and b/docs/static/img/favicon.ico differ diff --git a/docs/static/img/logo-horizontal-dark.svg b/docs/static/img/logo-horizontal-dark.svg new file mode 100644 index 0000000000..b17305fa3d --- /dev/null +++ b/docs/static/img/logo-horizontal-dark.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/docs/static/img/logo-horizontal.svg b/docs/static/img/logo-horizontal.svg new file mode 100644 index 0000000000..2cab618589 --- /dev/null +++ b/docs/static/img/logo-horizontal.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/docs/static/img/logo.svg b/docs/static/img/logo.svg new file mode 100644 index 0000000000..4e78f61097 --- /dev/null +++ b/docs/static/img/logo.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/docs/static/img/spiral.svg b/docs/static/img/spiral.svg new file mode 100644 index 0000000000..03a96552c5 --- /dev/null +++ b/docs/static/img/spiral.svg @@ -0,0 +1,206 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/theme/book.js b/docs/theme/book.js deleted file mode 100644 index 8936841986..0000000000 --- a/docs/theme/book.js +++ /dev/null @@ -1,600 +0,0 @@ -"use strict"; - -// Fix back button cache problem -window.onunload = function () { }; - -// Global variable, shared between modules -function playpen_text(playpen) { - let code_block = playpen.querySelector("code"); - - if (window.ace && code_block.classList.contains("editable")) { - let editor = window.ace.edit(code_block); - return editor.getValue(); - } else { - return code_block.textContent; - } -} - -(function codeSnippets() { - // Hide Rust code lines prepended with a specific character - var hiding_character = "#"; - - function fetch_with_timeout(url, options, timeout = 6000) { - return Promise.race([ - fetch(url, options), - new Promise((_, reject) => setTimeout(() => reject(new Error('timeout')), timeout)) - ]); - } - - var playpens = Array.from(document.querySelectorAll(".playpen")); - if (playpens.length > 0) { - fetch_with_timeout("https://play.rust-lang.org/meta/crates", { - headers: { - 'Content-Type': "application/json", - }, - method: 'POST', - mode: 'cors', - }) - .then(response => response.json()) - .then(response => { - // get list of crates available in the rust playground - let playground_crates = response.crates.map(item => item["id"]); - playpens.forEach(block => handle_crate_list_update(block, playground_crates)); - }); - } - - function handle_crate_list_update(playpen_block, playground_crates) { - // update the play buttons after receiving the response - update_play_button(playpen_block, playground_crates); - - // and install on change listener to dynamically update ACE editors - if (window.ace) { - let code_block = playpen_block.querySelector("code"); - if (code_block.classList.contains("editable")) { - let editor = window.ace.edit(code_block); - editor.addEventListener("change", function (e) { - update_play_button(playpen_block, playground_crates); - }); - } - } - } - - // updates the visibility of play button based on `no_run` class and - // used crates vs ones available on http://play.rust-lang.org - function update_play_button(pre_block, playground_crates) { - var play_button = pre_block.querySelector(".play-button"); - - // skip if code is `no_run` - if (pre_block.querySelector('code').classList.contains("no_run")) { - play_button.classList.add("hidden"); - return; - } - - // get list of `extern crate`'s from snippet - var txt = playpen_text(pre_block); - var re = /extern\s+crate\s+([a-zA-Z_0-9]+)\s*;/g; - var snippet_crates = []; - var item; - while (item = re.exec(txt)) { - snippet_crates.push(item[1]); - } - - // check if all used crates are available on play.rust-lang.org - var all_available = snippet_crates.every(function (elem) { - return playground_crates.indexOf(elem) > -1; - }); - - if (all_available) { - play_button.classList.remove("hidden"); - } else { - play_button.classList.add("hidden"); - } - } - - function run_rust_code(code_block) { - var result_block = code_block.querySelector(".result"); - if (!result_block) { - result_block = document.createElement('code'); - result_block.className = 'result hljs language-bash'; - - code_block.append(result_block); - } - - let text = playpen_text(code_block); - - var params = { - version: "stable", - optimize: "0", - code: text - }; - - if (text.indexOf("#![feature") !== -1) { - params.version = "nightly"; - } - - result_block.innerText = "Running..."; - - fetch_with_timeout("https://play.rust-lang.org/evaluate.json", { - headers: { - 'Content-Type': "application/json", - }, - method: 'POST', - mode: 'cors', - body: JSON.stringify(params) - }) - .then(response => response.json()) - .then(response => result_block.innerText = response.result) - .catch(error => result_block.innerText = "Playground Communication: " + error.message); - } - - // Syntax highlighting Configuration - hljs.configure({ - tabReplace: ' ', // 4 spaces - languages: [], // Languages used for auto-detection - }); - - if (window.ace) { - // language-rust class needs to be removed for editable - // blocks or highlightjs will capture events - Array - .from(document.querySelectorAll('code.editable')) - .forEach(function (block) { block.classList.remove('language-rust'); }); - - Array - .from(document.querySelectorAll('code:not(.editable)')) - .forEach(function (block) { hljs.highlightBlock(block); }); - } else { - Array - .from(document.querySelectorAll('code')) - .forEach(function (block) { hljs.highlightBlock(block); }); - } - - // Adding the hljs class gives code blocks the color css - // even if highlighting doesn't apply - Array - .from(document.querySelectorAll('code')) - .forEach(function (block) { block.classList.add('hljs'); }); - - Array.from(document.querySelectorAll("code.language-rust")).forEach(function (block) { - - var code_block = block; - var pre_block = block.parentNode; - // hide lines - var lines = code_block.innerHTML.split("\n"); - var first_non_hidden_line = false; - var lines_hidden = false; - var trimmed_line = ""; - - for (var n = 0; n < lines.length; n++) { - trimmed_line = lines[n].trim(); - if (trimmed_line[0] == hiding_character && trimmed_line[1] != hiding_character) { - if (first_non_hidden_line) { - lines[n] = "" + "\n" + lines[n].replace(/(\s*)# ?/, "$1") + ""; - } - else { - lines[n] = "" + lines[n].replace(/(\s*)# ?/, "$1") + "\n" + ""; - } - lines_hidden = true; - } - else if (first_non_hidden_line) { - lines[n] = "\n" + lines[n]; - } - else { - first_non_hidden_line = true; - } - if (trimmed_line[0] == hiding_character && trimmed_line[1] == hiding_character) { - lines[n] = lines[n].replace("##", "#") - } - } - code_block.innerHTML = lines.join(""); - - // If no lines were hidden, return - if (!lines_hidden) { return; } - - var buttons = document.createElement('div'); - buttons.className = 'buttons'; - buttons.innerHTML = ""; - - // add expand button - pre_block.insertBefore(buttons, pre_block.firstChild); - - pre_block.querySelector('.buttons').addEventListener('click', function (e) { - if (e.target.classList.contains('fa-expand')) { - var lines = pre_block.querySelectorAll('span.hidden'); - - e.target.classList.remove('fa-expand'); - e.target.classList.add('fa-compress'); - e.target.title = 'Hide lines'; - e.target.setAttribute('aria-label', e.target.title); - - Array.from(lines).forEach(function (line) { - line.classList.remove('hidden'); - line.classList.add('unhidden'); - }); - } else if (e.target.classList.contains('fa-compress')) { - var lines = pre_block.querySelectorAll('span.unhidden'); - - e.target.classList.remove('fa-compress'); - e.target.classList.add('fa-expand'); - e.target.title = 'Show hidden lines'; - e.target.setAttribute('aria-label', e.target.title); - - Array.from(lines).forEach(function (line) { - line.classList.remove('unhidden'); - line.classList.add('hidden'); - }); - } - }); - }); - - Array.from(document.querySelectorAll('pre code')).forEach(function (block) { - var pre_block = block.parentNode; - if (!pre_block.classList.contains('playpen')) { - var buttons = pre_block.querySelector(".buttons"); - if (!buttons) { - buttons = document.createElement('div'); - buttons.className = 'buttons'; - pre_block.insertBefore(buttons, pre_block.firstChild); - } - - var clipButton = document.createElement('button'); - clipButton.className = 'fa fa-copy clip-button'; - clipButton.title = 'Copy to clipboard'; - clipButton.setAttribute('aria-label', clipButton.title); - clipButton.innerHTML = ''; - - buttons.insertBefore(clipButton, buttons.firstChild); - } - }); - - // Process playpen code blocks - Array.from(document.querySelectorAll(".playpen")).forEach(function (pre_block) { - // Add play button - var buttons = pre_block.querySelector(".buttons"); - if (!buttons) { - buttons = document.createElement('div'); - buttons.className = 'buttons'; - pre_block.insertBefore(buttons, pre_block.firstChild); - } - - var runCodeButton = document.createElement('button'); - runCodeButton.className = 'fa fa-play play-button'; - runCodeButton.hidden = true; - runCodeButton.title = 'Run this code'; - runCodeButton.setAttribute('aria-label', runCodeButton.title); - - var copyCodeClipboardButton = document.createElement('button'); - copyCodeClipboardButton.className = 'fa fa-copy clip-button'; - copyCodeClipboardButton.innerHTML = ''; - copyCodeClipboardButton.title = 'Copy to clipboard'; - copyCodeClipboardButton.setAttribute('aria-label', copyCodeClipboardButton.title); - - buttons.insertBefore(runCodeButton, buttons.firstChild); - buttons.insertBefore(copyCodeClipboardButton, buttons.firstChild); - - runCodeButton.addEventListener('click', function (e) { - run_rust_code(pre_block); - }); - - let code_block = pre_block.querySelector("code"); - if (window.ace && code_block.classList.contains("editable")) { - var undoChangesButton = document.createElement('button'); - undoChangesButton.className = 'fa fa-history reset-button'; - undoChangesButton.title = 'Undo changes'; - undoChangesButton.setAttribute('aria-label', undoChangesButton.title); - - buttons.insertBefore(undoChangesButton, buttons.firstChild); - - undoChangesButton.addEventListener('click', function () { - let editor = window.ace.edit(code_block); - editor.setValue(editor.originalCode); - editor.clearSelection(); - }); - } - }); -})(); - -(function themes() { - var html = document.querySelector('html'); - var themeToggleButton = document.getElementById('theme-toggle'); - var themePopup = document.getElementById('theme-list'); - var themeColorMetaTag = document.querySelector('meta[name="theme-color"]'); - var stylesheets = { - ayuHighlight: document.querySelector("[href$='ayu-highlight.css']"), - tomorrowNight: document.querySelector("[href$='tomorrow-night.css']"), - highlight: document.querySelector("[href$='highlight.css']"), - }; - - function showThemes() { - themePopup.style.display = 'block'; - themeToggleButton.setAttribute('aria-expanded', true); - themePopup.querySelector("button#" + document.body.className).focus(); - } - - function hideThemes() { - themePopup.style.display = 'none'; - themeToggleButton.setAttribute('aria-expanded', false); - themeToggleButton.focus(); - } - - function set_theme(theme) { - let ace_theme; - - if (theme == 'coal' || theme == 'navy') { - stylesheets.ayuHighlight.disabled = true; - stylesheets.tomorrowNight.disabled = false; - stylesheets.highlight.disabled = true; - - ace_theme = "ace/theme/tomorrow_night"; - } else if (theme == 'ayu') { - stylesheets.ayuHighlight.disabled = false; - stylesheets.tomorrowNight.disabled = true; - stylesheets.highlight.disabled = true; - - ace_theme = "ace/theme/tomorrow_night"; - } else { - stylesheets.ayuHighlight.disabled = true; - stylesheets.tomorrowNight.disabled = true; - stylesheets.highlight.disabled = false; - - ace_theme = "ace/theme/dawn"; - } - - setTimeout(function () { - themeColorMetaTag.content = getComputedStyle(document.body).backgroundColor; - }, 1); - - if (window.ace && window.editors) { - window.editors.forEach(function (editor) { - editor.setTheme(ace_theme); - }); - } - - var previousTheme; - try { previousTheme = localStorage.getItem('mdbook-theme'); } catch (e) { } - if (previousTheme === null || previousTheme === undefined) { previousTheme = 'light'; } - - try { localStorage.setItem('mdbook-theme', theme); } catch (e) { } - - document.body.className = theme; - html.classList.remove(previousTheme); - html.classList.add(theme); - } - - // Set theme - var theme; - try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { } - if (theme === null || theme === undefined) { theme = 'light'; } - - set_theme(theme); - - // themeToggleButton.addEventListener('click', function () { - // if (themePopup.style.display === 'block') { - // hideThemes(); - // } else { - // showThemes(); - // } - // }); - - themePopup.addEventListener('click', function (e) { - var theme = e.target.id || e.target.parentElement.id; - set_theme(theme); - }); - - themePopup.addEventListener('focusout', function(e) { - // e.relatedTarget is null in Safari and Firefox on macOS (see workaround below) - if (!!e.relatedTarget && !themeToggleButton.contains(e.relatedTarget) && !themePopup.contains(e.relatedTarget)) { - hideThemes(); - } - }); - - // Should not be needed, but it works around an issue on macOS & iOS: https://github.com/rust-lang-nursery/mdBook/issues/628 - document.addEventListener('click', function(e) { - if (themePopup.style.display === 'block' && !themeToggleButton.contains(e.target) && !themePopup.contains(e.target)) { - hideThemes(); - } - }); - - document.addEventListener('keydown', function (e) { - if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { return; } - if (!themePopup.contains(e.target)) { return; } - - switch (e.key) { - case 'Escape': - e.preventDefault(); - hideThemes(); - break; - case 'ArrowUp': - e.preventDefault(); - var li = document.activeElement.parentElement; - if (li && li.previousElementSibling) { - li.previousElementSibling.querySelector('button').focus(); - } - break; - case 'ArrowDown': - e.preventDefault(); - var li = document.activeElement.parentElement; - if (li && li.nextElementSibling) { - li.nextElementSibling.querySelector('button').focus(); - } - break; - case 'Home': - e.preventDefault(); - themePopup.querySelector('li:first-child button').focus(); - break; - case 'End': - e.preventDefault(); - themePopup.querySelector('li:last-child button').focus(); - break; - } - }); -})(); - -(function sidebar() { - var html = document.querySelector("html"); - var sidebar = document.getElementById("sidebar"); - var sidebarLinks = document.querySelectorAll('#sidebar a'); - var sidebarToggleButton = document.getElementById("sidebar-toggle"); - var firstContact = null; - - function showSidebar() { - html.classList.remove('sidebar-hidden') - html.classList.add('sidebar-visible'); - Array.from(sidebarLinks).forEach(function (link) { - link.setAttribute('tabIndex', 0); - }); - sidebarToggleButton.setAttribute('aria-expanded', true); - sidebar.setAttribute('aria-hidden', false); - try { localStorage.setItem('mdbook-sidebar', 'visible'); } catch (e) { } - } - - function hideSidebar() { - html.classList.remove('sidebar-visible') - html.classList.add('sidebar-hidden'); - Array.from(sidebarLinks).forEach(function (link) { - link.setAttribute('tabIndex', -1); - }); - sidebarToggleButton.setAttribute('aria-expanded', false); - sidebar.setAttribute('aria-hidden', true); - try { localStorage.setItem('mdbook-sidebar', 'hidden'); } catch (e) { } - } - - // Toggle sidebar - sidebarToggleButton.addEventListener('click', function sidebarToggle() { - if (html.classList.contains("sidebar-hidden")) { - showSidebar(); - } else if (html.classList.contains("sidebar-visible")) { - hideSidebar(); - } else { - if (getComputedStyle(sidebar)['transform'] === 'none') { - hideSidebar(); - } else { - showSidebar(); - } - } - }); - - document.addEventListener('touchstart', function (e) { - firstContact = { - x: e.touches[0].clientX, - time: Date.now() - }; - }, { passive: true }); - - document.addEventListener('touchmove', function (e) { - if (!firstContact) - return; - - var curX = e.touches[0].clientX; - var xDiff = curX - firstContact.x, - tDiff = Date.now() - firstContact.time; - - if (tDiff < 250 && Math.abs(xDiff) >= 150) { - if (xDiff >= 0 && firstContact.x < Math.min(document.body.clientWidth * 0.25, 300)) - showSidebar(); - else if (xDiff < 0 && curX < 300) - hideSidebar(); - - firstContact = null; - } - }, { passive: true }); - - // Scroll sidebar to current active section - var activeSection = sidebar.querySelector(".active"); - if (activeSection) { - sidebar.scrollTop = activeSection.offsetTop; - } -})(); - -(function chapterNavigation() { - document.addEventListener('keydown', function (e) { - if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { return; } - if (window.search && window.search.hasFocus()) { return; } - - switch (e.key) { - case 'ArrowRight': - e.preventDefault(); - var nextButton = document.querySelector('.nav-chapters.next'); - if (nextButton) { - window.location.href = nextButton.href; - } - break; - case 'ArrowLeft': - e.preventDefault(); - var previousButton = document.querySelector('.nav-chapters.previous'); - if (previousButton) { - window.location.href = previousButton.href; - } - break; - } - }); -})(); - -(function clipboard() { - var clipButtons = document.querySelectorAll('.clip-button'); - - function hideTooltip(elem) { - elem.firstChild.innerText = ""; - elem.className = 'fa fa-copy clip-button'; - } - - function showTooltip(elem, msg) { - elem.firstChild.innerText = msg; - elem.className = 'fa fa-copy tooltipped'; - } - - var clipboardSnippets = new Clipboard('.clip-button', { - text: function (trigger) { - hideTooltip(trigger); - let playpen = trigger.closest("pre"); - return playpen_text(playpen); - } - }); - - Array.from(clipButtons).forEach(function (clipButton) { - clipButton.addEventListener('mouseout', function (e) { - hideTooltip(e.currentTarget); - }); - }); - - clipboardSnippets.on('success', function (e) { - e.clearSelection(); - showTooltip(e.trigger, "Copied!"); - }); - - clipboardSnippets.on('error', function (e) { - showTooltip(e.trigger, "Clipboard error!"); - }); -})(); - -(function scrollToTop () { - var menuTitle = document.querySelector('.menu-title'); - - menuTitle.addEventListener('click', function () { - document.scrollingElement.scrollTo({ top: 0, behavior: 'smooth' }); - }); -})(); - -(function autoHideMenu() { - var menu = document.getElementById('menu-bar'); - - var previousScrollTop = document.scrollingElement.scrollTop; - - document.addEventListener('scroll', function () { - if (menu.classList.contains('folded') && document.scrollingElement.scrollTop < previousScrollTop) { - menu.classList.remove('folded'); - } else if (!menu.classList.contains('folded') && document.scrollingElement.scrollTop > previousScrollTop) { - menu.classList.add('folded'); - } - - if (!menu.classList.contains('bordered') && document.scrollingElement.scrollTop > 0) { - menu.classList.add('bordered'); - } - - if (menu.classList.contains('bordered') && document.scrollingElement.scrollTop === 0) { - menu.classList.remove('bordered'); - } - - previousScrollTop = document.scrollingElement.scrollTop; - }, { passive: true }); -})(); diff --git a/docs/theme/css/chrome.css b/docs/theme/css/chrome.css deleted file mode 100644 index 5c3096a9ac..0000000000 --- a/docs/theme/css/chrome.css +++ /dev/null @@ -1,524 +0,0 @@ -/* CSS for UI elements (a.k.a. chrome) */ -@import 'variables.css'; - -::-webkit-scrollbar { - background: var(--bg); -} -::-webkit-scrollbar-thumb { - background: var(--scrollbar); -} - -#searchresults a, -a:visited, -a > .hljs { - color: #000; -} - -#searchresults a:hover { - text-decoration: underline; -} - -.content a { - color: #000; -} - -/* Menu Bar */ - -#menu-bar { - position: -webkit-sticky; - position: sticky; - top: 0; - padding: 0 15px; - padding: 0; - z-index: 101; - width: 100%; - /* margin: auto calc(0px - var(--page-padding)); */ -} -#menu-bar > #menu-bar-sticky-container { - display: flex; - flex-wrap: wrap; - background-color: var(--bg); - border-bottom-color: var(--bg); - border-bottom-width: 1px; - border-bottom-style: solid; -} -.js #menu-bar > #menu-bar-sticky-container { - transition: transform 0.3s; -} -#menu-bar.bordered > #menu-bar-sticky-container { - border-bottom-color: var(--table-border-color); -} -#menu-bar i, #menu-bar .icon-button { - position: relative; - padding: 0 8px; - z-index: 10; - line-height: 50px; - cursor: pointer; - transition: color 0.5s; -} -@media only screen and (max-width: 420px) { - #menu-bar i, #menu-bar .icon-button { - padding: 0 5px; - } -} - -.icon-button { - border: none; - background: none; - padding: 0; - color: inherit; -} -.icon-button i { - margin: 0; -} - -#print-button { - margin: 0 15px; -} - -html:not(.sidebar-visible) #menu-bar:not(:hover).folded > #menu-bar-sticky-container { - transform: translateY(-60px); -} - -.left-buttons { - display: flex; - margin: 0 5px; -} -.no-js .left-buttons { - display: none; -} - -.menu-title { - display: inline-block; - font-weight: 200; - font-size: 20px; - line-height: 50px; - text-align: center; - margin: 0; - flex: 1; - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; -} -.js .menu-title { - cursor: pointer; -} - -.menu-bar, -.menu-bar:visited, -.nav-chapters, -.nav-chapters:visited, -.mobile-nav-chapters, -.mobile-nav-chapters:visited, -.menu-bar .icon-button, -.menu-bar a i { - color: var(--icons); -} - -.menu-bar i:hover, -.menu-bar .icon-button:hover, -.nav-chapters:hover, -.mobile-nav-chapters i:hover { - color: var(--icons-hover); -} - -/* Nav Icons */ - -.nav-chapters { - font-size: 2.5em; - text-align: center; - text-decoration: none; - - position: fixed; - top: 50px; /* Height of menu-bar */ - bottom: 0; - margin: 0; - max-width: 150px; - min-width: 90px; - - display: flex; - justify-content: center; - align-content: center; - flex-direction: column; - - transition: color 0.5s; -} - -.nav-chapters:hover { text-decoration: none; } - -.nav-wrapper { - margin-top: 50px; - display: none; -} - -.mobile-nav-chapters { - font-size: 2.5em; - text-align: center; - text-decoration: none; - width: 90px; - border-radius: 5px; - background-color: var(--sidebar-bg); -} - -.previous { - float: left; -} - -.next { - float: right; - right: 15px; -} - -@media only screen and (max-width: 1080px) { - .nav-wide-wrapper { display: none; } - .nav-wrapper { display: block; } -} - -@media only screen and (max-width: 1380px) { - .sidebar-visible .nav-wide-wrapper { display: none; } - .sidebar-visible .nav-wrapper { display: block; } -} - -/* Inline code */ - -:not(pre) > .hljs { - display: inline-block; - vertical-align: middle; - padding: 0.1em 0.3em; - border-radius: 3px; - color: var(--inline-code-color); -} - -a:hover > .hljs { - text-decoration: underline; -} - -pre { - position: relative; -} -pre > .buttons { - position: absolute; - z-index: 100; - right: 5px; - top: 5px; - - color: var(--sidebar-fg); - cursor: pointer; -} -pre > .buttons :hover { - color: var(--sidebar-active); -} -pre > .buttons i { - margin-left: 8px; -} -pre > .buttons button { - color: inherit; - background: transparent; - border: none; - cursor: inherit; -} -pre > .result { - margin-top: 10px; -} - -/* Search */ - -#searchresults a { - text-decoration: none; -} - -mark { - border-radius: 2px; - padding: 0 3px 1px 3px; - margin: 0 -3px -1px -3px; - background-color: var(--search-mark-bg); - transition: background-color 300ms linear; - cursor: pointer; -} - -mark.fade-out { - background-color: rgba(0,0,0,0) !important; - cursor: auto; -} - -.searchbar-outer { - margin-left: auto; - margin-right: auto; - max-width: var(--content-max-width); -} - -#searchbar { - width: 100%; - margin: 5px auto 0px auto; - padding: 10px 16px; - transition: box-shadow 300ms ease-in-out; - border: 1px solid var(--searchbar-border-color); - border-radius: 3px; - background-color: var(--searchbar-bg); - color: var(--searchbar-fg); -} -#searchbar:focus, -#searchbar.active { - box-shadow: 0 0 3px var(--searchbar-shadow-color); -} - -.searchresults-header { - font-weight: normal; - font-size: 1em; - padding: 18px 28px 0 28px; - color: var(--searchresults-header-fg); -} - -.searchresults-outer { - margin-left: auto; - margin-right: auto; - max-width: var(--content-max-width); - border-bottom: 1px dashed var(--searchresults-border-color); -} - -ul#searchresults { - list-style: none; - padding-left: 28px; -} -ul#searchresults li { - margin: 10px 0px; - padding: 2px; - border-radius: 2px; -} -ul#searchresults li.focus { - background-color: var(--searchresults-li-bg); -} -ul#searchresults span.teaser { - display: block; - clear: both; - margin: 5px 0 0 20px; - font-size: 0.8em; -} -ul#searchresults span.teaser em { - font-weight: bold; - font-style: normal; -} - -/* Sidebar */ - -.sidebar { - position: fixed; - left: 0; - top: 0; - bottom: 0; - width: var(--sidebar-width); - overflow-y: auto; - font-size: 0.875em; - box-sizing: border-box; - -webkit-overflow-scrolling: touch; - overscroll-behavior-y: contain; - background-color: var(--sidebar-bg); - color: var(--sidebar-fg); -} -.sidebar img { - display: block; - max-width: 70%; - margin: 20px auto; -} -.js .sidebar { - transition: transform 0.3s; /* Animation: slide away */ -} -.sidebar code { - line-height: 2em; -} -.sidebar-hidden .sidebar { - transform: translateX(calc(0px - var(--sidebar-width))); -} -.sidebar::-webkit-scrollbar { - background: var(--sidebar-bg); -} -.sidebar::-webkit-scrollbar-thumb { - background: var(--scrollbar); -} - -.sidebar-visible .page-wrapper { - transform: translateX(var(--sidebar-width)); -} -@media only screen and (min-width: 620px) { - .sidebar-visible .page-wrapper { - transform: none; - margin-left: var(--sidebar-width); - } -} - -.chapter { - list-style: none outside none; - padding-left: 0; - line-height: 2.2em; - margin-top: 0; -} -.chapter li { - color: var(--sidebar-non-existant); -} -.chapter li a { - color: var(--sidebar-fg); - display: block; - padding: 0; - text-decoration: none; - padding-left: 25px; - font-size: 13px; - padding-top: 0.3em; - padding-bottom: 0.3em; - font-weight: normal; -} -.chapter li a strong { - font-weight: normal; -} -.chapter li a:hover { color: var(--sidebar-active); - background: #00A670; } -.chapter li .active { - /* Animate color change */ - color: var(--sidebar-active); - background: #00A670; -} -.content a:hover { - color: #000; - background: none; -} - -.spacer { - width: 100%; - height: 3px; - margin: 5px 0px; -} -.chapter .spacer { - background-color: var(--sidebar-spacer); -} - -@media (-moz-touch-enabled: 1), (pointer: coarse) { - /* .chapter li a { padding: 5px 0; } */ - .spacer { margin: 10px 0; } -} - -.section { - list-style: none outside none; - padding-left: 20px; - line-height: 1.9em; -} - -/* Theme Menu Popup */ - -.theme-popup { - position: absolute; - left: 10px; - top: 50px; - z-index: 1000; - border-radius: 4px; - font-size: 0.7em; - color: var(--fg); - background: var(--theme-popup-bg); - border: 1px solid var(--theme-popup-border); - margin: 0; - padding: 0; - list-style: none; - display: none; -} -.theme-popup .default { - color: var(--icons); -} -.theme-popup .theme { - width: 100%; - border: 0; - margin: 0; - padding: 2px 10px; - line-height: 25px; - white-space: nowrap; - text-align: left; - cursor: pointer; - color: inherit; - background: inherit; - font-size: inherit; -} -.theme-popup .theme:hover { - background-color: var(--theme-hover); -} -.theme-popup .theme:hover:first-child, -.theme-popup .theme:hover:last-child { - border-top-left-radius: inherit; - border-top-right-radius: inherit; -} - -.content p { - line-height: 1.6; - margin-top: 0; - padding: 0 28px; -} - -.content h1 { - font-size: 25px; - font-weight: 300; - padding: 0 28px; - padding-top: 0.5em; - padding-bottom: 0.5em; - margin-bottom: 21px; - margin-top: 2em; - border-top: 1px solid #e5e5e5; - border-bottom: 1px solid #e5e5e5; - background-color: #fff; - font-family: Poppins, sans-serif; - -} - -.content h2 { - font-family: Poppins, sans-serif; - font-size: 20px; - font-weight: 300; - margin-top: 2em; - margin-bottom: 0; - padding: 0 28px; - padding-top: 1.2em; - padding-bottom: 1.2em; -} - -.content h3, h4, h5 { - font-size: 15px; - margin-top: 2.5em; - margin-bottom: 0.8em; - padding: 0 28px; -} - -.content code { - background-color: rgba(0,0,0,0.05); - padding: 3px; - border-radius: 3px; - font-family: Consolas, Menlo, Monaco, "Lucida Console", "Liberation Mono", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Courier New", monospace, serif; - font-size: 13px; - line-height: 1.5; - color: #333; -} - -.language-ini.hljs, -.language-manpage.hljs, -.language-sh.hljs, -.language-bash.hljs { - background-color: #262B26; - color: #fff; - margin: 0; - padding-top: 2em; - padding-bottom: 2em; - padding: 2em 28px; -} - -.content table { - margin-bottom: 1em; -} -.content ul { - padding: 0 28px; - padding-left: 43px; -} -.content ul li { - line-height: 1.6; - margin-top: 0; -} -.content ul p { - padding: 0; - margin: 0; -} -.content pre { - padding: 0 28px; -} diff --git a/docs/theme/css/general.css b/docs/theme/css/general.css deleted file mode 100644 index 079db56191..0000000000 --- a/docs/theme/css/general.css +++ /dev/null @@ -1,155 +0,0 @@ -/* Base styles and content styles */ - -@import 'variables.css'; - -html { - font-family: Lato, 'Helvetica Neue', 'Arial', sans-serif; - color: var(--fg); - background-color: var(--bg); - text-size-adjust: none; - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; -} - -body { - margin: 0; - font-size: 1rem; - overflow-x: hidden; - font-family: Lato, 'Helvetica Neue', 'Arial', sans-serif; - font-size: 16px; - font-weight: 300; - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; -} - -code { - font-family: Consolas, Menlo, Monaco, "Lucida Console", "Liberation Mono", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Courier New", monospace, serif; - font-size: 13px; /* please adjust the ace font size accordingly in editor.js */ - line-height: 1.5; -} - -.left { float: left; } -.right { float: right; } -.hidden { display: none; } -.play-button.hidden { display: none; } - -h1, h2, h3 { margin-top: 2.5em; } -h4, h5 { margin-top: 2em; } - -.header + .header h3, -.header + .header h4, -.header + .header h5 { - margin-top: 1em; -} - -a.header:target h1:before, -a.header:target h2:before, -a.header:target h3:before, -a.header:target h4:before { - display: inline-block; - content: "»"; - margin-left: -30px; - width: 30px; -} - -.page { - outline: 0; - /* padding: 0 var(--page-padding); */ -} -.page-wrapper { - box-sizing: border-box; -} -.js .page-wrapper { - transition: margin-left 0.3s ease, transform 0.3s ease; /* Animation: slide away */ -} - -.content { - overflow-y: auto; - - padding-bottom: 50px; -} -.content main { - margin-left: auto; - margin-right: auto; - max-width: var(--content-max-width); -} -.content a:hover { text-decoration: underline; } -.content img { max-width: 100%; } -.content .header:link, -.content .header:visited { - color: var(--fg); -} -.content .header:link, -.content .header:visited:hover { - text-decoration: none; -} - -table { - margin: 0 auto; - border-collapse: collapse; -} -table td { - padding: 3px 20px; - border: 1px var(--table-border-color) solid; -} -table thead { - background: var(--table-header-bg); -} -table thead td { - font-weight: 700; - border: none; -} -table thead tr { - border: 1px var(--table-header-bg) solid; -} -/* Alternate background colors for rows */ -table tbody tr:nth-child(2n) { - background: var(--table-alternate-bg); -} - - -blockquote { - margin: 20px 0; - padding: 0 20px; - color: var(--fg); - background-color: var(--quote-bg); - border-top: .1em solid var(--quote-border); - border-bottom: .1em solid var(--quote-border); -} - - -:not(.footnote-definition) + .footnote-definition, -.footnote-definition + :not(.footnote-definition) { - margin-top: 2em; -} -.footnote-definition { - font-size: 0.9em; - margin: 0.5em 0; -} -.footnote-definition p { - display: inline; -} - -.tooltiptext { - position: absolute; - visibility: hidden; - color: #fff; - background-color: #333; - transform: translateX(-50%); /* Center by moving tooltip 50% of its width left */ - left: -8px; /* Half of the width of the icon */ - top: -35px; - font-size: 0.8em; - text-align: center; - border-radius: 6px; - padding: 5px 8px; - margin: 5px; - z-index: 1000; -} -.tooltipped .tooltiptext { - visibility: visible; -} -*:focus, -*:active, -*:hover { - outline: none; -} diff --git a/docs/theme/css/print.css b/docs/theme/css/print.css deleted file mode 100644 index 5e690f7559..0000000000 --- a/docs/theme/css/print.css +++ /dev/null @@ -1,54 +0,0 @@ - -#sidebar, -#menu-bar, -.nav-chapters, -.mobile-nav-chapters { - display: none; -} - -#page-wrapper.page-wrapper { - transform: none; - margin-left: 0px; - overflow-y: initial; -} - -#content { - max-width: none; - margin: 0; - padding: 0; -} - -.page { - overflow-y: initial; -} - -code { - background-color: #666666; - border-radius: 5px; - - /* Force background to be printed in Chrome */ - -webkit-print-color-adjust: exact; -} - -pre > .buttons { - z-index: 2; -} - -a, a:visited, a:active, a:hover { - color: #4183c4; - text-decoration: none; -} - -h1, h2, h3, h4, h5, h6 { - page-break-inside: avoid; - page-break-after: avoid; -} - -pre, code { - page-break-inside: avoid; - white-space: pre-wrap; -} - -.fa { - display: none !important; -} diff --git a/docs/theme/css/variables.css b/docs/theme/css/variables.css deleted file mode 100644 index 9f3e57e31d..0000000000 --- a/docs/theme/css/variables.css +++ /dev/null @@ -1,210 +0,0 @@ - -/* Globals */ - -:root { - --sidebar-width: 300px; - /* --page-padding: 15px; */ - --content-max-width: 100%; -} - -/* Themes */ - -.ayu { - --bg: hsl(210, 25%, 8%); - --fg: #c5c5c5; - - --sidebar-bg: #14191f; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #5c6773; - --sidebar-active: #ffb454; - --sidebar-spacer: #2d334f; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #b7b9cc; - - --links: #0096cf; - - --inline-code-color: #ffb454; - - --theme-popup-bg: #14191f; - --theme-popup-border: #5c6773; - --theme-hover: #191f26; - - --quote-bg: hsl(226, 15%, 17%); - --quote-border: hsl(226, 15%, 22%); - - --table-border-color: hsl(210, 25%, 13%); - --table-header-bg: hsl(210, 25%, 28%); - --table-alternate-bg: hsl(210, 25%, 11%); - - --searchbar-border-color: #848484; - --searchbar-bg: #424242; - --searchbar-fg: #fff; - --searchbar-shadow-color: #d4c89f; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #252932; - --search-mark-bg: #e3b171; -} - -.coal { - --bg: hsl(200, 7%, 8%); - --fg: #98a3ad; - - --sidebar-bg: #292c2f; - --sidebar-fg: #fff; - --sidebar-non-existant: #505254; - --sidebar-active: #fff; - --sidebar-spacer: #393939; - - --scrollbar: var(--sidebar-fg); - - --icons: #43484d; - --icons-hover: #b3c0cc; - - --links: #2b79a2; - - --inline-code-color: #c5c8c6;; - - --theme-popup-bg: #141617; - --theme-popup-border: #43484d; - --theme-hover: #1f2124; - - --quote-bg: hsl(234, 21%, 18%); - --quote-border: hsl(234, 21%, 23%); - - --table-border-color: hsl(200, 7%, 13%); - --table-header-bg: hsl(200, 7%, 28%); - --table-alternate-bg: hsl(200, 7%, 11%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #b7b7b7; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #98a3ad; - --searchresults-li-bg: #2b2b2f; - --search-mark-bg: #355c7d; -} - -.light { - --bg: #f7f7f7; - --fg: #333333; - - --sidebar-bg: #050505; - --sidebar-fg: #fff; - --sidebar-non-existant: #aaaaaa; - --sidebar-active: #fff; - --sidebar-spacer: #f4f4f4; - - --scrollbar: #cccccc; - - --icons: #cccccc; - --icons-hover: #333333; - - --links: #000; - - --inline-code-color: #6e6b5e; - - --theme-popup-bg: #fafafa; - --theme-popup-border: #cccccc; - --theme-hover: #e6e6e6; - - --quote-bg: hsl(197, 37%, 96%); - --quote-border: hsl(197, 37%, 91%); - - --table-border-color: hsl(0, 0%, 95%); - --table-header-bg: hsl(0, 0%, 80%); - --table-alternate-bg: hsl(0, 0%, 97%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #fafafa; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #e4f2fe; - --search-mark-bg: #a2cff5; -} - -.navy { - --bg: hsl(226, 23%, 11%); - --fg: #bcbdd0; - - --sidebar-bg: #282d3f; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #505274; - --sidebar-active: #2b79a2; - --sidebar-spacer: #2d334f; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #b7b9cc; - - --links: #2b79a2; - - --inline-code-color: #c5c8c6;; - - --theme-popup-bg: #161923; - --theme-popup-border: #737480; - --theme-hover: #282e40; - - --quote-bg: hsl(226, 15%, 17%); - --quote-border: hsl(226, 15%, 22%); - - --table-border-color: hsl(226, 23%, 16%); - --table-header-bg: hsl(226, 23%, 31%); - --table-alternate-bg: hsl(226, 23%, 14%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #aeaec6; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #5f5f71; - --searchresults-border-color: #5c5c68; - --searchresults-li-bg: #242430; - --search-mark-bg: #a2cff5; -} - -.rust { - --bg: hsl(60, 9%, 87%); - --fg: #262625; - - --sidebar-bg: #3b2e2a; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #505254; - --sidebar-active: #e69f67; - --sidebar-spacer: #45373a; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #262625; - - --links: #2b79a2; - - --inline-code-color: #6e6b5e; - - --theme-popup-bg: #e1e1db; - --theme-popup-border: #b38f6b; - --theme-hover: #99908a; - - --quote-bg: hsl(60, 5%, 75%); - --quote-border: hsl(60, 5%, 70%); - - --table-border-color: hsl(60, 9%, 82%); - --table-header-bg: #b3a497; - --table-alternate-bg: hsl(60, 9%, 84%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #fafafa; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #dec2a2; - --search-mark-bg: #e69f67; -} diff --git a/docs/theme/favicon.png b/docs/theme/favicon.png deleted file mode 100644 index a5b1aa16c4..0000000000 Binary files a/docs/theme/favicon.png and /dev/null differ diff --git a/docs/theme/highlight.css b/docs/theme/highlight.css deleted file mode 100644 index 90d87d3ad1..0000000000 --- a/docs/theme/highlight.css +++ /dev/null @@ -1,69 +0,0 @@ -/* Base16 Atelier Dune Light - Theme */ -/* by Bram de Haan (http://atelierbram.github.io/syntax-highlighting/atelier-schemes/dune) */ -/* Original Base16 color scheme by Chris Kempson (https://github.com/chriskempson/base16) */ - -/* Atelier-Dune Comment */ -.hljs-comment, -.hljs-quote { - color: #AAA; -} - -/* Atelier-Dune Red */ -.hljs-variable, -.hljs-template-variable, -.hljs-attribute, -.hljs-tag, -.hljs-name, -.hljs-regexp, -.hljs-link, -.hljs-name, -.hljs-selector-id, -.hljs-selector-class { - color: #f92672; -} - -/* Atelier-Dune Orange */ -.hljs-number, -.hljs-meta, -.hljs-built_in, -.hljs-builtin-name, -.hljs-literal, -.hljs-type, -.hljs-params { - color: #f6aa11; -} - -/* Atelier-Dune Green */ -.hljs-string, -.hljs-symbol, -.hljs-bullet { - color: #60ac39; -} - -/* Atelier-Dune Blue */ -.hljs-title, -.hljs-section { - color: #6684e1; -} - -/* Atelier-Dune Purple */ -.hljs-keyword, -.hljs-selector-tag { - color: #b854d4; -} - -.hljs { - display: block; - overflow-x: auto; - background: #f1f1f1; - color: #6e6b5e; - padding: 0.5em; -} - -.hljs-emphasis { - font-style: italic; -} - -.hljs-strong { - font-weight: bold; -} diff --git a/docs/theme/highlight.js b/docs/theme/highlight.js deleted file mode 100644 index 2787266881..0000000000 --- a/docs/theme/highlight.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! highlight.js v9.12.0 | BSD3 License | git.io/hljslicense */ -!function(e){var n="object"==typeof window&&window||"object"==typeof self&&self;"undefined"!=typeof exports?e(exports):n&&(n.hljs=e({}),"function"==typeof define&&define.amd&&define([],function(){return n.hljs}))}(function(e){function n(e){return e.replace(/&/g,"&").replace(//g,">")}function t(e){return e.nodeName.toLowerCase()}function r(e,n){var t=e&&e.exec(n);return t&&0===t.index}function a(e){return k.test(e)}function i(e){var n,t,r,i,o=e.className+" ";if(o+=e.parentNode?e.parentNode.className:"",t=B.exec(o))return w(t[1])?t[1]:"no-highlight";for(o=o.split(/\s+/),n=0,r=o.length;r>n;n++)if(i=o[n],a(i)||w(i))return i}function o(e){var n,t={},r=Array.prototype.slice.call(arguments,1);for(n in e)t[n]=e[n];return r.forEach(function(e){for(n in e)t[n]=e[n]}),t}function u(e){var n=[];return function r(e,a){for(var i=e.firstChild;i;i=i.nextSibling)3===i.nodeType?a+=i.nodeValue.length:1===i.nodeType&&(n.push({event:"start",offset:a,node:i}),a=r(i,a),t(i).match(/br|hr|img|input/)||n.push({event:"stop",offset:a,node:i}));return a}(e,0),n}function c(e,r,a){function i(){return e.length&&r.length?e[0].offset!==r[0].offset?e[0].offset"}function u(e){s+=""}function c(e){("start"===e.event?o:u)(e.node)}for(var l=0,s="",f=[];e.length||r.length;){var g=i();if(s+=n(a.substring(l,g[0].offset)),l=g[0].offset,g===e){f.reverse().forEach(u);do c(g.splice(0,1)[0]),g=i();while(g===e&&g.length&&g[0].offset===l);f.reverse().forEach(o)}else"start"===g[0].event?f.push(g[0].node):f.pop(),c(g.splice(0,1)[0])}return s+n(a.substr(l))}function l(e){return e.v&&!e.cached_variants&&(e.cached_variants=e.v.map(function(n){return o(e,{v:null},n)})),e.cached_variants||e.eW&&[o(e)]||[e]}function s(e){function n(e){return e&&e.source||e}function t(t,r){return new RegExp(n(t),"m"+(e.cI?"i":"")+(r?"g":""))}function r(a,i){if(!a.compiled){if(a.compiled=!0,a.k=a.k||a.bK,a.k){var o={},u=function(n,t){e.cI&&(t=t.toLowerCase()),t.split(" ").forEach(function(e){var t=e.split("|");o[t[0]]=[n,t[1]?Number(t[1]):1]})};"string"==typeof a.k?u("keyword",a.k):x(a.k).forEach(function(e){u(e,a.k[e])}),a.k=o}a.lR=t(a.l||/\w+/,!0),i&&(a.bK&&(a.b="\\b("+a.bK.split(" ").join("|")+")\\b"),a.b||(a.b=/\B|\b/),a.bR=t(a.b),a.e||a.eW||(a.e=/\B|\b/),a.e&&(a.eR=t(a.e)),a.tE=n(a.e)||"",a.eW&&i.tE&&(a.tE+=(a.e?"|":"")+i.tE)),a.i&&(a.iR=t(a.i)),null==a.r&&(a.r=1),a.c||(a.c=[]),a.c=Array.prototype.concat.apply([],a.c.map(function(e){return l("self"===e?a:e)})),a.c.forEach(function(e){r(e,a)}),a.starts&&r(a.starts,i);var c=a.c.map(function(e){return e.bK?"\\.?("+e.b+")\\.?":e.b}).concat([a.tE,a.i]).map(n).filter(Boolean);a.t=c.length?t(c.join("|"),!0):{exec:function(){return null}}}}r(e)}function f(e,t,a,i){function o(e,n){var t,a;for(t=0,a=n.c.length;a>t;t++)if(r(n.c[t].bR,e))return n.c[t]}function u(e,n){if(r(e.eR,n)){for(;e.endsParent&&e.parent;)e=e.parent;return e}return e.eW?u(e.parent,n):void 0}function c(e,n){return!a&&r(n.iR,e)}function l(e,n){var t=N.cI?n[0].toLowerCase():n[0];return e.k.hasOwnProperty(t)&&e.k[t]}function p(e,n,t,r){var a=r?"":I.classPrefix,i='',i+n+o}function h(){var e,t,r,a;if(!E.k)return n(k);for(a="",t=0,E.lR.lastIndex=0,r=E.lR.exec(k);r;)a+=n(k.substring(t,r.index)),e=l(E,r),e?(B+=e[1],a+=p(e[0],n(r[0]))):a+=n(r[0]),t=E.lR.lastIndex,r=E.lR.exec(k);return a+n(k.substr(t))}function d(){var e="string"==typeof E.sL;if(e&&!y[E.sL])return n(k);var t=e?f(E.sL,k,!0,x[E.sL]):g(k,E.sL.length?E.sL:void 0);return E.r>0&&(B+=t.r),e&&(x[E.sL]=t.top),p(t.language,t.value,!1,!0)}function b(){L+=null!=E.sL?d():h(),k=""}function v(e){L+=e.cN?p(e.cN,"",!0):"",E=Object.create(e,{parent:{value:E}})}function m(e,n){if(k+=e,null==n)return b(),0;var t=o(n,E);if(t)return t.skip?k+=n:(t.eB&&(k+=n),b(),t.rB||t.eB||(k=n)),v(t,n),t.rB?0:n.length;var r=u(E,n);if(r){var a=E;a.skip?k+=n:(a.rE||a.eE||(k+=n),b(),a.eE&&(k=n));do E.cN&&(L+=C),E.skip||(B+=E.r),E=E.parent;while(E!==r.parent);return r.starts&&v(r.starts,""),a.rE?0:n.length}if(c(n,E))throw new Error('Illegal lexeme "'+n+'" for mode "'+(E.cN||"")+'"');return k+=n,n.length||1}var N=w(e);if(!N)throw new Error('Unknown language: "'+e+'"');s(N);var R,E=i||N,x={},L="";for(R=E;R!==N;R=R.parent)R.cN&&(L=p(R.cN,"",!0)+L);var k="",B=0;try{for(var M,j,O=0;;){if(E.t.lastIndex=O,M=E.t.exec(t),!M)break;j=m(t.substring(O,M.index),M[0]),O=M.index+j}for(m(t.substr(O)),R=E;R.parent;R=R.parent)R.cN&&(L+=C);return{r:B,value:L,language:e,top:E}}catch(T){if(T.message&&-1!==T.message.indexOf("Illegal"))return{r:0,value:n(t)};throw T}}function g(e,t){t=t||I.languages||x(y);var r={r:0,value:n(e)},a=r;return t.filter(w).forEach(function(n){var t=f(n,e,!1);t.language=n,t.r>a.r&&(a=t),t.r>r.r&&(a=r,r=t)}),a.language&&(r.second_best=a),r}function p(e){return I.tabReplace||I.useBR?e.replace(M,function(e,n){return I.useBR&&"\n"===e?"
":I.tabReplace?n.replace(/\t/g,I.tabReplace):""}):e}function h(e,n,t){var r=n?L[n]:t,a=[e.trim()];return e.match(/\bhljs\b/)||a.push("hljs"),-1===e.indexOf(r)&&a.push(r),a.join(" ").trim()}function d(e){var n,t,r,o,l,s=i(e);a(s)||(I.useBR?(n=document.createElementNS("http://www.w3.org/1999/xhtml","div"),n.innerHTML=e.innerHTML.replace(/\n/g,"").replace(//g,"\n")):n=e,l=n.textContent,r=s?f(s,l,!0):g(l),t=u(n),t.length&&(o=document.createElementNS("http://www.w3.org/1999/xhtml","div"),o.innerHTML=r.value,r.value=c(t,u(o),l)),r.value=p(r.value),e.innerHTML=r.value,e.className=h(e.className,s,r.language),e.result={language:r.language,re:r.r},r.second_best&&(e.second_best={language:r.second_best.language,re:r.second_best.r}))}function b(e){I=o(I,e)}function v(){if(!v.called){v.called=!0;var e=document.querySelectorAll("pre code");E.forEach.call(e,d)}}function m(){addEventListener("DOMContentLoaded",v,!1),addEventListener("load",v,!1)}function N(n,t){var r=y[n]=t(e);r.aliases&&r.aliases.forEach(function(e){L[e]=n})}function R(){return x(y)}function w(e){return e=(e||"").toLowerCase(),y[e]||y[L[e]]}var E=[],x=Object.keys,y={},L={},k=/^(no-?highlight|plain|text)$/i,B=/\blang(?:uage)?-([\w-]+)\b/i,M=/((^(<[^>]+>|\t|)+|(?:\n)))/gm,C="
",I={classPrefix:"hljs-",tabReplace:null,useBR:!1,languages:void 0};return e.highlight=f,e.highlightAuto=g,e.fixMarkup=p,e.highlightBlock=d,e.configure=b,e.initHighlighting=v,e.initHighlightingOnLoad=m,e.registerLanguage=N,e.listLanguages=R,e.getLanguage=w,e.inherit=o,e.IR="[a-zA-Z]\\w*",e.UIR="[a-zA-Z_]\\w*",e.NR="\\b\\d+(\\.\\d+)?",e.CNR="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",e.BNR="\\b(0b[01]+)",e.RSR="!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",e.BE={b:"\\\\[\\s\\S]",r:0},e.ASM={cN:"string",b:"'",e:"'",i:"\\n",c:[e.BE]},e.QSM={cN:"string",b:'"',e:'"',i:"\\n",c:[e.BE]},e.PWM={b:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/},e.C=function(n,t,r){var a=e.inherit({cN:"comment",b:n,e:t,c:[]},r||{});return a.c.push(e.PWM),a.c.push({cN:"doctag",b:"(?:TODO|FIXME|NOTE|BUG|XXX):",r:0}),a},e.CLCM=e.C("//","$"),e.CBCM=e.C("/\\*","\\*/"),e.HCM=e.C("#","$"),e.NM={cN:"number",b:e.NR,r:0},e.CNM={cN:"number",b:e.CNR,r:0},e.BNM={cN:"number",b:e.BNR,r:0},e.CSSNM={cN:"number",b:e.NR+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",r:0},e.RM={cN:"regexp",b:/\//,e:/\/[gimuy]*/,i:/\n/,c:[e.BE,{b:/\[/,e:/\]/,r:0,c:[e.BE]}]},e.TM={cN:"title",b:e.IR,r:0},e.UTM={cN:"title",b:e.UIR,r:0},e.METHOD_GUARD={b:"\\.\\s*"+e.UIR,r:0},e});hljs.registerLanguage("diff",function(e){return{aliases:["patch"],c:[{cN:"meta",r:10,v:[{b:/^@@ +\-\d+,\d+ +\+\d+,\d+ +@@$/},{b:/^\*\*\* +\d+,\d+ +\*\*\*\*$/},{b:/^\-\-\- +\d+,\d+ +\-\-\-\-$/}]},{cN:"comment",v:[{b:/Index: /,e:/$/},{b:/={3,}/,e:/$/},{b:/^\-{3}/,e:/$/},{b:/^\*{3} /,e:/$/},{b:/^\+{3}/,e:/$/},{b:/\*{5}/,e:/\*{5}$/}]},{cN:"addition",b:"^\\+",e:"$"},{cN:"deletion",b:"^\\-",e:"$"},{cN:"addition",b:"^\\!",e:"$"}]}});hljs.registerLanguage("nginx",function(e){var r={cN:"variable",v:[{b:/\$\d+/},{b:/\$\{/,e:/}/},{b:"[\\$\\@]"+e.UIR}]},b={eW:!0,l:"[a-z/_]+",k:{literal:"on off yes no true false none blocked debug info notice warn error crit select break last permanent redirect kqueue rtsig epoll poll /dev/poll"},r:0,i:"=>",c:[e.HCM,{cN:"string",c:[e.BE,r],v:[{b:/"/,e:/"/},{b:/'/,e:/'/}]},{b:"([a-z]+):/",e:"\\s",eW:!0,eE:!0,c:[r]},{cN:"regexp",c:[e.BE,r],v:[{b:"\\s\\^",e:"\\s|{|;",rE:!0},{b:"~\\*?\\s+",e:"\\s|{|;",rE:!0},{b:"\\*(\\.[a-z\\-]+)+"},{b:"([a-z\\-]+\\.)+\\*"}]},{cN:"number",b:"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?\\b"},{cN:"number",b:"\\b\\d+[kKmMgGdshdwy]*\\b",r:0},r]};return{aliases:["nginxconf"],c:[e.HCM,{b:e.UIR+"\\s+{",rB:!0,e:"{",c:[{cN:"section",b:e.UIR}],r:0},{b:e.UIR+"\\s",e:";|{",rB:!0,c:[{cN:"attribute",b:e.UIR,starts:b}],r:0}],i:"[^\\s\\}]"}});hljs.registerLanguage("objectivec",function(e){var t={cN:"built_in",b:"\\b(AV|CA|CF|CG|CI|CL|CM|CN|CT|MK|MP|MTK|MTL|NS|SCN|SK|UI|WK|XC)\\w+"},_={keyword:"int float while char export sizeof typedef const struct for union unsigned long volatile static bool mutable if do return goto void enum else break extern asm case short default double register explicit signed typename this switch continue wchar_t inline readonly assign readwrite self @synchronized id typeof nonatomic super unichar IBOutlet IBAction strong weak copy in out inout bycopy byref oneway __strong __weak __block __autoreleasing @private @protected @public @try @property @end @throw @catch @finally @autoreleasepool @synthesize @dynamic @selector @optional @required @encode @package @import @defs @compatibility_alias __bridge __bridge_transfer __bridge_retained __bridge_retain __covariant __contravariant __kindof _Nonnull _Nullable _Null_unspecified __FUNCTION__ __PRETTY_FUNCTION__ __attribute__ getter setter retain unsafe_unretained nonnull nullable null_unspecified null_resettable class instancetype NS_DESIGNATED_INITIALIZER NS_UNAVAILABLE NS_REQUIRES_SUPER NS_RETURNS_INNER_POINTER NS_INLINE NS_AVAILABLE NS_DEPRECATED NS_ENUM NS_OPTIONS NS_SWIFT_UNAVAILABLE NS_ASSUME_NONNULL_BEGIN NS_ASSUME_NONNULL_END NS_REFINED_FOR_SWIFT NS_SWIFT_NAME NS_SWIFT_NOTHROW NS_DURING NS_HANDLER NS_ENDHANDLER NS_VALUERETURN NS_VOIDRETURN",literal:"false true FALSE TRUE nil YES NO NULL",built_in:"BOOL dispatch_once_t dispatch_queue_t dispatch_sync dispatch_async dispatch_once"},i=/[a-zA-Z@][a-zA-Z0-9_]*/,n="@interface @class @protocol @implementation";return{aliases:["mm","objc","obj-c"],k:_,l:i,i:""}]}]},{cN:"class",b:"("+n.split(" ").join("|")+")\\b",e:"({|$)",eE:!0,k:n,l:i,c:[e.UTM]},{b:"\\."+e.UIR,r:0}]}});hljs.registerLanguage("xml",function(s){var e="[A-Za-z0-9\\._:-]+",t={eW:!0,i:/`]+/}]}]}]};return{aliases:["html","xhtml","rss","atom","xjb","xsd","xsl","plist"],cI:!0,c:[{cN:"meta",b:"",r:10,c:[{b:"\\[",e:"\\]"}]},s.C("",{r:10}),{b:"<\\!\\[CDATA\\[",e:"\\]\\]>",r:10},{b:/<\?(php)?/,e:/\?>/,sL:"php",c:[{b:"/\\*",e:"\\*/",skip:!0}]},{cN:"tag",b:"|$)",e:">",k:{name:"style"},c:[t],starts:{e:"",rE:!0,sL:["css","xml"]}},{cN:"tag",b:"|$)",e:">",k:{name:"script"},c:[t],starts:{e:"",rE:!0,sL:["actionscript","javascript","handlebars","xml"]}},{cN:"meta",v:[{b:/<\?xml/,e:/\?>/,r:10},{b:/<\?\w+/,e:/\?>/}]},{cN:"tag",b:"",c:[{cN:"name",b:/[^\/><\s]+/,r:0},t]}]}});hljs.registerLanguage("handlebars",function(e){var a={"builtin-name":"each in with if else unless bindattr action collection debugger log outlet template unbound view yield"};return{aliases:["hbs","html.hbs","html.handlebars"],cI:!0,sL:"xml",c:[e.C("{{!(--)?","(--)?}}"),{cN:"template-tag",b:/\{\{[#\/]/,e:/\}\}/,c:[{cN:"name",b:/[a-zA-Z\.-]+/,k:a,starts:{eW:!0,r:0,c:[e.QSM]}}]},{cN:"template-variable",b:/\{\{/,e:/\}\}/,k:a}]}});hljs.registerLanguage("ini",function(e){var b={cN:"string",c:[e.BE],v:[{b:"'''",e:"'''",r:10},{b:'"""',e:'"""',r:10},{b:'"',e:'"'},{b:"'",e:"'"}]};return{aliases:["toml"],cI:!0,i:/\S/,c:[e.C(";","$"),e.HCM,{cN:"section",b:/^\s*\[+/,e:/\]+/},{b:/^[a-z0-9\[\]_-]+\s*=\s*/,e:"$",rB:!0,c:[{cN:"attr",b:/[a-z0-9\[\]_-]+/},{b:/=/,eW:!0,r:0,c:[{cN:"literal",b:/\bon|off|true|false|yes|no\b/},{cN:"variable",v:[{b:/\$[\w\d"][\w\d_]*/},{b:/\$\{(.*?)}/}]},b,{cN:"number",b:/([\+\-]+)?[\d]+_[\d_]+/},e.NM]}]}]}});hljs.registerLanguage("javascript",function(e){var r="[A-Za-z$_][0-9A-Za-z$_]*",t={keyword:"in of if for while finally var new function do return void else break catch instanceof with throw case default try this switch continue typeof delete let yield const export super debugger as async await static import from as",literal:"true false null undefined NaN Infinity",built_in:"eval isFinite isNaN parseFloat parseInt decodeURI decodeURIComponent encodeURI encodeURIComponent escape unescape Object Function Boolean Error EvalError InternalError RangeError ReferenceError StopIteration SyntaxError TypeError URIError Number Math Date String RegExp Array Float32Array Float64Array Int16Array Int32Array Int8Array Uint16Array Uint32Array Uint8Array Uint8ClampedArray ArrayBuffer DataView JSON Intl arguments require module console window document Symbol Set Map WeakSet WeakMap Proxy Reflect Promise"},a={cN:"number",v:[{b:"\\b(0[bB][01]+)"},{b:"\\b(0[oO][0-7]+)"},{b:e.CNR}],r:0},n={cN:"subst",b:"\\$\\{",e:"\\}",k:t,c:[]},c={cN:"string",b:"`",e:"`",c:[e.BE,n]};n.c=[e.ASM,e.QSM,c,a,e.RM];var s=n.c.concat([e.CBCM,e.CLCM]);return{aliases:["js","jsx"],k:t,c:[{cN:"meta",r:10,b:/^\s*['"]use (strict|asm)['"]/},{cN:"meta",b:/^#!/,e:/$/},e.ASM,e.QSM,c,e.CLCM,e.CBCM,a,{b:/[{,]\s*/,r:0,c:[{b:r+"\\s*:",rB:!0,r:0,c:[{cN:"attr",b:r,r:0}]}]},{b:"("+e.RSR+"|\\b(case|return|throw)\\b)\\s*",k:"return throw case",c:[e.CLCM,e.CBCM,e.RM,{cN:"function",b:"(\\(.*?\\)|"+r+")\\s*=>",rB:!0,e:"\\s*=>",c:[{cN:"params",v:[{b:r},{b:/\(\s*\)/},{b:/\(/,e:/\)/,eB:!0,eE:!0,k:t,c:s}]}]},{b://,sL:"xml",c:[{b:/<\w+\s*\/>/,skip:!0},{b:/<\w+/,e:/(\/\w+|\w+\/)>/,skip:!0,c:[{b:/<\w+\s*\/>/,skip:!0},"self"]}]}],r:0},{cN:"function",bK:"function",e:/\{/,eE:!0,c:[e.inherit(e.TM,{b:r}),{cN:"params",b:/\(/,e:/\)/,eB:!0,eE:!0,c:s}],i:/\[|%/},{b:/\$[(.]/},e.METHOD_GUARD,{cN:"class",bK:"class",e:/[{;=]/,eE:!0,i:/[:"\[\]]/,c:[{bK:"extends"},e.UTM]},{bK:"constructor",e:/\{/,eE:!0}],i:/#(?!!)/}});hljs.registerLanguage("python",function(e){var r={keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda async await nonlocal|10 None True False",built_in:"Ellipsis NotImplemented"},b={cN:"meta",b:/^(>>>|\.\.\.) /},c={cN:"subst",b:/\{/,e:/\}/,k:r,i:/#/},a={cN:"string",c:[e.BE],v:[{b:/(u|b)?r?'''/,e:/'''/,c:[b],r:10},{b:/(u|b)?r?"""/,e:/"""/,c:[b],r:10},{b:/(fr|rf|f)'''/,e:/'''/,c:[b,c]},{b:/(fr|rf|f)"""/,e:/"""/,c:[b,c]},{b:/(u|r|ur)'/,e:/'/,r:10},{b:/(u|r|ur)"/,e:/"/,r:10},{b:/(b|br)'/,e:/'/},{b:/(b|br)"/,e:/"/},{b:/(fr|rf|f)'/,e:/'/,c:[c]},{b:/(fr|rf|f)"/,e:/"/,c:[c]},e.ASM,e.QSM]},s={cN:"number",r:0,v:[{b:e.BNR+"[lLjJ]?"},{b:"\\b(0o[0-7]+)[lLjJ]?"},{b:e.CNR+"[lLjJ]?"}]},i={cN:"params",b:/\(/,e:/\)/,c:["self",b,s,a]};return c.c=[a,s,b],{aliases:["py","gyp"],k:r,i:/(<\/|->|\?)|=>/,c:[b,s,a,e.HCM,{v:[{cN:"function",bK:"def"},{cN:"class",bK:"class"}],e:/:/,i:/[${=;\n,]/,c:[e.UTM,i,{b:/->/,eW:!0,k:"None"}]},{cN:"meta",b:/^[\t ]*@/,e:/$/},{b:/\b(print|exec)\(/}]}});hljs.registerLanguage("markdown",function(e){return{aliases:["md","mkdown","mkd"],c:[{cN:"section",v:[{b:"^#{1,6}",e:"$"},{b:"^.+?\\n[=-]{2,}$"}]},{b:"<",e:">",sL:"xml",r:0},{cN:"bullet",b:"^([*+-]|(\\d+\\.))\\s+"},{cN:"strong",b:"[*_]{2}.+?[*_]{2}"},{cN:"emphasis",v:[{b:"\\*.+?\\*"},{b:"_.+?_",r:0}]},{cN:"quote",b:"^>\\s+",e:"$"},{cN:"code",v:[{b:"^```w*s*$",e:"^```s*$"},{b:"`.+?`"},{b:"^( {4}| )",e:"$",r:0}]},{b:"^[-\\*]{3,}",e:"$"},{b:"\\[.+?\\][\\(\\[].*?[\\)\\]]",rB:!0,c:[{cN:"string",b:"\\[",e:"\\]",eB:!0,rE:!0,r:0},{cN:"link",b:"\\]\\(",e:"\\)",eB:!0,eE:!0},{cN:"symbol",b:"\\]\\[",e:"\\]",eB:!0,eE:!0}],r:10},{b:/^\[[^\n]+\]:/,rB:!0,c:[{cN:"symbol",b:/\[/,e:/\]/,eB:!0,eE:!0},{cN:"link",b:/:\s*/,e:/$/,eB:!0}]}]}});hljs.registerLanguage("php",function(e){var c={b:"\\$+[a-zA-Z_-ÿ][a-zA-Z0-9_-ÿ]*"},i={cN:"meta",b:/<\?(php)?|\?>/},t={cN:"string",c:[e.BE,i],v:[{b:'b"',e:'"'},{b:"b'",e:"'"},e.inherit(e.ASM,{i:null}),e.inherit(e.QSM,{i:null})]},a={v:[e.BNM,e.CNM]};return{aliases:["php3","php4","php5","php6"],cI:!0,k:"and include_once list abstract global private echo interface as static endswitch array null if endwhile or const for endforeach self var while isset public protected exit foreach throw elseif include __FILE__ empty require_once do xor return parent clone use __CLASS__ __LINE__ else break print eval new catch __METHOD__ case exception default die require __FUNCTION__ enddeclare final try switch continue endfor endif declare unset true false trait goto instanceof insteadof __DIR__ __NAMESPACE__ yield finally",c:[e.HCM,e.C("//","$",{c:[i]}),e.C("/\\*","\\*/",{c:[{cN:"doctag",b:"@[A-Za-z]+"}]}),e.C("__halt_compiler.+?;",!1,{eW:!0,k:"__halt_compiler",l:e.UIR}),{cN:"string",b:/<<<['"]?\w+['"]?$/,e:/^\w+;?$/,c:[e.BE,{cN:"subst",v:[{b:/\$\w+/},{b:/\{\$/,e:/\}/}]}]},i,{cN:"keyword",b:/\$this\b/},c,{b:/(::|->)+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/},{cN:"function",bK:"function",e:/[;{]/,eE:!0,i:"\\$|\\[|%",c:[e.UTM,{cN:"params",b:"\\(",e:"\\)",c:["self",c,e.CBCM,t,a]}]},{cN:"class",bK:"class interface",e:"{",eE:!0,i:/[:\(\$"]/,c:[{bK:"extends implements"},e.UTM]},{bK:"namespace",e:";",i:/[\.']/,c:[e.UTM]},{bK:"use",e:";",c:[e.UTM]},{b:"=>"},t,a]}});hljs.registerLanguage("d",function(e){var t={keyword:"abstract alias align asm assert auto body break byte case cast catch class const continue debug default delete deprecated do else enum export extern final finally for foreach foreach_reverse|10 goto if immutable import in inout int interface invariant is lazy macro mixin module new nothrow out override package pragma private protected public pure ref return scope shared static struct super switch synchronized template this throw try typedef typeid typeof union unittest version void volatile while with __FILE__ __LINE__ __gshared|10 __thread __traits __DATE__ __EOF__ __TIME__ __TIMESTAMP__ __VENDOR__ __VERSION__",built_in:"bool cdouble cent cfloat char creal dchar delegate double dstring float function idouble ifloat ireal long real short string ubyte ucent uint ulong ushort wchar wstring",literal:"false null true"},r="(0|[1-9][\\d_]*)",a="(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)",i="0[bB][01_]+",n="([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*)",_="0[xX]"+n,c="([eE][+-]?"+a+")",d="("+a+"(\\.\\d*|"+c+")|\\d+\\."+a+a+"|\\."+r+c+"?)",o="(0[xX]("+n+"\\."+n+"|\\.?"+n+")[pP][+-]?"+a+")",s="("+r+"|"+i+"|"+_+")",l="("+o+"|"+d+")",u="\\\\(['\"\\?\\\\abfnrtv]|u[\\dA-Fa-f]{4}|[0-7]{1,3}|x[\\dA-Fa-f]{2}|U[\\dA-Fa-f]{8})|&[a-zA-Z\\d]{2,};",b={cN:"number",b:"\\b"+s+"(L|u|U|Lu|LU|uL|UL)?",r:0},f={cN:"number",b:"\\b("+l+"([fF]|L|i|[fF]i|Li)?|"+s+"(i|[fF]i|Li))",r:0},g={cN:"string",b:"'("+u+"|.)",e:"'",i:"."},h={b:u,r:0},p={cN:"string",b:'"',c:[h],e:'"[cwd]?'},m={cN:"string",b:'[rq]"',e:'"[cwd]?',r:5},w={cN:"string",b:"`",e:"`[cwd]?"},N={cN:"string",b:'x"[\\da-fA-F\\s\\n\\r]*"[cwd]?',r:10},A={cN:"string",b:'q"\\{',e:'\\}"'},F={cN:"meta",b:"^#!",e:"$",r:5},y={cN:"meta",b:"#(line)",e:"$",r:5},L={cN:"keyword",b:"@[a-zA-Z_][a-zA-Z_\\d]*"},v=e.C("\\/\\+","\\+\\/",{c:["self"],r:10});return{l:e.UIR,k:t,c:[e.CLCM,e.CBCM,v,N,p,m,w,A,f,b,g,F,y,L]}});hljs.registerLanguage("json",function(e){var i={literal:"true false null"},n=[e.QSM,e.CNM],r={e:",",eW:!0,eE:!0,c:n,k:i},t={b:"{",e:"}",c:[{cN:"attr",b:/"/,e:/"/,c:[e.BE],i:"\\n"},e.inherit(r,{b:/:/})],i:"\\S"},c={b:"\\[",e:"\\]",c:[e.inherit(r)],i:"\\S"};return n.splice(n.length,0,t,c),{c:n,k:i,i:"\\S"}});hljs.registerLanguage("go",function(e){var t={keyword:"break default func interface select case map struct chan else goto package switch const fallthrough if range type continue for import return var go defer bool byte complex64 complex128 float32 float64 int8 int16 int32 int64 string uint8 uint16 uint32 uint64 int uint uintptr rune",literal:"true false iota nil",built_in:"append cap close complex copy imag len make new panic print println real recover delete"};return{aliases:["golang"],k:t,i:"{",e:"}"},n={v:[{b:/\$\d/},{b:/[\$%@](\^\w\b|#\w+(::\w+)*|{\w+}|\w+(::\w*)*)/},{b:/[\$%@][^\s\w{]/,r:0}]},i=[e.BE,r,n],o=[n,e.HCM,e.C("^\\=\\w","\\=cut",{eW:!0}),s,{cN:"string",c:i,v:[{b:"q[qwxr]?\\s*\\(",e:"\\)",r:5},{b:"q[qwxr]?\\s*\\[",e:"\\]",r:5},{b:"q[qwxr]?\\s*\\{",e:"\\}",r:5},{b:"q[qwxr]?\\s*\\|",e:"\\|",r:5},{b:"q[qwxr]?\\s*\\<",e:"\\>",r:5},{b:"qw\\s+q",e:"q",r:5},{b:"'",e:"'",c:[e.BE]},{b:'"',e:'"'},{b:"`",e:"`",c:[e.BE]},{b:"{\\w+}",c:[],r:0},{b:"-?\\w+\\s*\\=\\>",c:[],r:0}]},{cN:"number",b:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",r:0},{b:"(\\/\\/|"+e.RSR+"|\\b(split|return|print|reverse|grep)\\b)\\s*",k:"split return print reverse grep",r:0,c:[e.HCM,{cN:"regexp",b:"(s|tr|y)/(\\\\.|[^/])*/(\\\\.|[^/])*/[a-z]*",r:10},{cN:"regexp",b:"(m|qr)?/",e:"/[a-z]*",c:[e.BE],r:0}]},{cN:"function",bK:"sub",e:"(\\s*\\(.*?\\))?[;{]",eE:!0,r:5,c:[e.TM]},{b:"-\\w\\b",r:0},{b:"^__DATA__$",e:"^__END__$",sL:"mojolicious",c:[{b:"^@@.*",e:"$",cN:"comment"}]}];return r.c=o,s.c=o,{aliases:["pl","pm"],l:/[\w\.]+/,k:t,c:o}});hljs.registerLanguage("rust",function(e){var t="([ui](8|16|32|64|128|size)|f(32|64))?",r="alignof as be box break const continue crate do else enum extern false fn for if impl in let loop match mod mut offsetof once priv proc pub pure ref return self Self sizeof static struct super trait true type typeof unsafe unsized use virtual while where yield move default",n="drop i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize f32 f64 str char bool Box Option Result String Vec Copy Send Sized Sync Drop Fn FnMut FnOnce ToOwned Clone Debug PartialEq PartialOrd Eq Ord AsRef AsMut Into From Default Iterator Extend IntoIterator DoubleEndedIterator ExactSizeIterator SliceConcatExt ToString assert! assert_eq! bitflags! bytes! cfg! col! concat! concat_idents! debug_assert! debug_assert_eq! env! panic! file! format! format_args! include_bin! include_str! line! local_data_key! module_path! option_env! print! println! select! stringify! try! unimplemented! unreachable! vec! write! writeln! macro_rules! assert_ne! debug_assert_ne!";return{aliases:["rs"],k:{keyword:r,literal:"true false Some None Ok Err",built_in:n},l:e.IR+"!?",i:""}]}});hljs.registerLanguage("ruby",function(e){var b="[a-zA-Z_]\\w*[!?=]?|[-+~]\\@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?",r={keyword:"and then defined module in return redo if BEGIN retry end for self when next until do begin unless END rescue else break undef not super class case require yield alias while ensure elsif or include attr_reader attr_writer attr_accessor",literal:"true false nil"},c={cN:"doctag",b:"@[A-Za-z]+"},a={b:"#<",e:">"},s=[e.C("#","$",{c:[c]}),e.C("^\\=begin","^\\=end",{c:[c],r:10}),e.C("^__END__","\\n$")],n={cN:"subst",b:"#\\{",e:"}",k:r},t={cN:"string",c:[e.BE,n],v:[{b:/'/,e:/'/},{b:/"/,e:/"/},{b:/`/,e:/`/},{b:"%[qQwWx]?\\(",e:"\\)"},{b:"%[qQwWx]?\\[",e:"\\]"},{b:"%[qQwWx]?{",e:"}"},{b:"%[qQwWx]?<",e:">"},{b:"%[qQwWx]?/",e:"/"},{b:"%[qQwWx]?%",e:"%"},{b:"%[qQwWx]?-",e:"-"},{b:"%[qQwWx]?\\|",e:"\\|"},{b:/\B\?(\\\d{1,3}|\\x[A-Fa-f0-9]{1,2}|\\u[A-Fa-f0-9]{4}|\\?\S)\b/},{b:/<<(-?)\w+$/,e:/^\s*\w+$/}]},i={cN:"params",b:"\\(",e:"\\)",endsParent:!0,k:r},d=[t,a,{cN:"class",bK:"class module",e:"$|;",i:/=/,c:[e.inherit(e.TM,{b:"[A-Za-z_]\\w*(::\\w+)*(\\?|\\!)?"}),{b:"<\\s*",c:[{b:"("+e.IR+"::)?"+e.IR}]}].concat(s)},{cN:"function",bK:"def",e:"$|;",c:[e.inherit(e.TM,{b:b}),i].concat(s)},{b:e.IR+"::"},{cN:"symbol",b:e.UIR+"(\\!|\\?)?:",r:0},{cN:"symbol",b:":(?!\\s)",c:[t,{b:b}],r:0},{cN:"number",b:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",r:0},{b:"(\\$\\W)|((\\$|\\@\\@?)(\\w+))"},{cN:"params",b:/\|/,e:/\|/,k:r},{b:"("+e.RSR+"|unless)\\s*",k:"unless",c:[a,{cN:"regexp",c:[e.BE,n],i:/\n/,v:[{b:"/",e:"/[a-z]*"},{b:"%r{",e:"}[a-z]*"},{b:"%r\\(",e:"\\)[a-z]*"},{b:"%r!",e:"![a-z]*"},{b:"%r\\[",e:"\\][a-z]*"}]}].concat(s),r:0}].concat(s);n.c=d,i.c=d;var l="[>?]>",o="[\\w#]+\\(\\w+\\):\\d+:\\d+>",u="(\\w+-)?\\d+\\.\\d+\\.\\d(p\\d+)?[^>]+>",w=[{b:/^\s*=>/,starts:{e:"$",c:d}},{cN:"meta",b:"^("+l+"|"+o+"|"+u+")",starts:{e:"$",c:d}}];return{aliases:["rb","gemspec","podspec","thor","irb"],k:r,i:/\/\*/,c:s.concat(w).concat(d)}});hljs.registerLanguage("makefile",function(e){var i={cN:"variable",v:[{b:"\\$\\("+e.UIR+"\\)",c:[e.BE]},{b:/\$[@%] *$",rE:!0,c:l.c,e:t.v[0].b},{b:"<%[%=-]?",e:"[%-]?%>",sL:"ruby",eB:!0,eE:!0,r:0},{cN:"type",b:"!!"+e.UIR},{cN:"meta",b:"&"+e.UIR+"$"},{cN:"meta",b:"\\*"+e.UIR+"$"},{cN:"bullet",b:"^ *-",r:0},e.HCM,{bK:b,k:{literal:b}},e.CNM,l]}});hljs.registerLanguage("css",function(e){var c="[a-zA-Z-][a-zA-Z0-9_-]*",t={b:/[A-Z\_\.\-]+\s*:/,rB:!0,e:";",eW:!0,c:[{cN:"attribute",b:/\S/,e:":",eE:!0,starts:{eW:!0,eE:!0,c:[{b:/[\w-]+\(/,rB:!0,c:[{cN:"built_in",b:/[\w-]+/},{b:/\(/,e:/\)/,c:[e.ASM,e.QSM]}]},e.CSSNM,e.QSM,e.ASM,e.CBCM,{cN:"number",b:"#[0-9A-Fa-f]+"},{cN:"meta",b:"!important"}]}}]};return{cI:!0,i:/[=\/|'\$]/,c:[e.CBCM,{cN:"selector-id",b:/#[A-Za-z0-9_-]+/},{cN:"selector-class",b:/\.[A-Za-z0-9_-]+/},{cN:"selector-attr",b:/\[/,e:/\]/,i:"$"},{cN:"selector-pseudo",b:/:(:)?[a-zA-Z0-9\_\-\+\(\)"'.]+/},{b:"@(font-face|page)",l:"[a-z-]+",k:"font-face page"},{b:"@",e:"[{;]",i:/:/,c:[{cN:"keyword",b:/\w+/},{b:/\s/,eW:!0,eE:!0,r:0,c:[e.ASM,e.QSM,e.CSSNM]}]},{cN:"selector-tag",b:c,r:0},{b:"{",e:"}",i:/\S/,c:[e.CBCM,t]}]}});hljs.registerLanguage("java",function(e){var a="[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*",t=a+"(<"+a+"(\\s*,\\s*"+a+")*>)?",r="false synchronized int abstract float private char boolean static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private module requires exports do",s="\\b(0[bB]([01]+[01_]+[01]+|[01]+)|0[xX]([a-fA-F0-9]+[a-fA-F0-9_]+[a-fA-F0-9]+|[a-fA-F0-9]+)|(([\\d]+[\\d_]+[\\d]+|[\\d]+)(\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))?|\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))([eE][-+]?\\d+)?)[lLfF]?",c={cN:"number",b:s,r:0};return{aliases:["jsp"],k:r,i:/<\/|#/,c:[e.C("/\\*\\*","\\*/",{r:0,c:[{b:/\w+@/,r:0},{cN:"doctag",b:"@[A-Za-z]+"}]}),e.CLCM,e.CBCM,e.ASM,e.QSM,{cN:"class",bK:"class interface",e:/[{;=]/,eE:!0,k:"class interface",i:/[:"\[\]]/,c:[{bK:"extends implements"},e.UTM]},{bK:"new throw return else",r:0},{cN:"function",b:"("+t+"\\s+)+"+e.UIR+"\\s*\\(",rB:!0,e:/[{;=]/,eE:!0,k:r,c:[{b:e.UIR+"\\s*\\(",rB:!0,r:0,c:[e.UTM]},{cN:"params",b:/\(/,e:/\)/,k:r,r:0,c:[e.ASM,e.QSM,e.CNM,e.CBCM]},e.CLCM,e.CBCM]},c,{cN:"meta",b:"@[A-Za-z]+"}]}});hljs.registerLanguage("armasm",function(s){return{cI:!0,aliases:["arm"],l:"\\.?"+s.IR,k:{meta:".2byte .4byte .align .ascii .asciz .balign .byte .code .data .else .end .endif .endm .endr .equ .err .exitm .extern .global .hword .if .ifdef .ifndef .include .irp .long .macro .rept .req .section .set .skip .space .text .word .arm .thumb .code16 .code32 .force_thumb .thumb_func .ltorg ALIAS ALIGN ARM AREA ASSERT ATTR CN CODE CODE16 CODE32 COMMON CP DATA DCB DCD DCDU DCDO DCFD DCFDU DCI DCQ DCQU DCW DCWU DN ELIF ELSE END ENDFUNC ENDIF ENDP ENTRY EQU EXPORT EXPORTAS EXTERN FIELD FILL FUNCTION GBLA GBLL GBLS GET GLOBAL IF IMPORT INCBIN INCLUDE INFO KEEP LCLA LCLL LCLS LTORG MACRO MAP MEND MEXIT NOFP OPT PRESERVE8 PROC QN READONLY RELOC REQUIRE REQUIRE8 RLIST FN ROUT SETA SETL SETS SN SPACE SUBT THUMB THUMBX TTL WHILE WEND ",built_in:"r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 pc lr sp ip sl sb fp a1 a2 a3 a4 v1 v2 v3 v4 v5 v6 v7 v8 f0 f1 f2 f3 f4 f5 f6 f7 p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 cpsr_c cpsr_x cpsr_s cpsr_f cpsr_cx cpsr_cxs cpsr_xs cpsr_xsf cpsr_sf cpsr_cxsf spsr_c spsr_x spsr_s spsr_f spsr_cx spsr_cxs spsr_xs spsr_xsf spsr_sf spsr_cxsf s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 s27 s28 s29 s30 s31 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 {PC} {VAR} {TRUE} {FALSE} {OPT} {CONFIG} {ENDIAN} {CODESIZE} {CPU} {FPU} {ARCHITECTURE} {PCSTOREOFFSET} {ARMASM_VERSION} {INTER} {ROPI} {RWPI} {SWST} {NOSWST} . @"},c:[{cN:"keyword",b:"\\b(adc|(qd?|sh?|u[qh]?)?add(8|16)?|usada?8|(q|sh?|u[qh]?)?(as|sa)x|and|adrl?|sbc|rs[bc]|asr|b[lx]?|blx|bxj|cbn?z|tb[bh]|bic|bfc|bfi|[su]bfx|bkpt|cdp2?|clz|clrex|cmp|cmn|cpsi[ed]|cps|setend|dbg|dmb|dsb|eor|isb|it[te]{0,3}|lsl|lsr|ror|rrx|ldm(([id][ab])|f[ds])?|ldr((s|ex)?[bhd])?|movt?|mvn|mra|mar|mul|[us]mull|smul[bwt][bt]|smu[as]d|smmul|smmla|mla|umlaal|smlal?([wbt][bt]|d)|mls|smlsl?[ds]|smc|svc|sev|mia([bt]{2}|ph)?|mrr?c2?|mcrr2?|mrs|msr|orr|orn|pkh(tb|bt)|rbit|rev(16|sh)?|sel|[su]sat(16)?|nop|pop|push|rfe([id][ab])?|stm([id][ab])?|str(ex)?[bhd]?|(qd?)?sub|(sh?|q|u[qh]?)?sub(8|16)|[su]xt(a?h|a?b(16)?)|srs([id][ab])?|swpb?|swi|smi|tst|teq|wfe|wfi|yield)(eq|ne|cs|cc|mi|pl|vs|vc|hi|ls|ge|lt|gt|le|al|hs|lo)?[sptrx]?",e:"\\s"},s.C("[;@]","$",{r:0}),s.CBCM,s.QSM,{cN:"string",b:"'",e:"[^\\\\]'",r:0},{cN:"title",b:"\\|",e:"\\|",i:"\\n",r:0},{cN:"number",v:[{b:"[#$=]?0x[0-9a-f]+"},{b:"[#$=]?0b[01]+"},{b:"[#$=]\\d+"},{b:"\\b\\d+"}],r:0},{cN:"symbol",v:[{b:"^[a-z_\\.\\$][a-z0-9_\\.\\$]+"},{b:"^\\s*[a-z_\\.\\$][a-z0-9_\\.\\$]+:"},{b:"[=#]\\w+"}],r:0}]}});hljs.registerLanguage("swift",function(e){var i={keyword:"__COLUMN__ __FILE__ __FUNCTION__ __LINE__ as as! as? associativity break case catch class continue convenience default defer deinit didSet do dynamic dynamicType else enum extension fallthrough false fileprivate final for func get guard if import in indirect infix init inout internal is lazy left let mutating nil none nonmutating open operator optional override postfix precedence prefix private protocol Protocol public repeat required rethrows return right self Self set static struct subscript super switch throw throws true try try! try? Type typealias unowned var weak where while willSet",literal:"true false nil",built_in:"abs advance alignof alignofValue anyGenerator assert assertionFailure bridgeFromObjectiveC bridgeFromObjectiveCUnconditional bridgeToObjectiveC bridgeToObjectiveCUnconditional c contains count countElements countLeadingZeros debugPrint debugPrintln distance dropFirst dropLast dump encodeBitsAsWords enumerate equal fatalError filter find getBridgedObjectiveCType getVaList indices insertionSort isBridgedToObjectiveC isBridgedVerbatimToObjectiveC isUniquelyReferenced isUniquelyReferencedNonObjC join lazy lexicographicalCompare map max maxElement min minElement numericCast overlaps partition posix precondition preconditionFailure print println quickSort readLine reduce reflect reinterpretCast reverse roundUpToAlignment sizeof sizeofValue sort split startsWith stride strideof strideofValue swap toString transcode underestimateCount unsafeAddressOf unsafeBitCast unsafeDowncast unsafeUnwrap unsafeReflect withExtendedLifetime withObjectAtPlusZero withUnsafePointer withUnsafePointerToObject withUnsafeMutablePointer withUnsafeMutablePointers withUnsafePointer withUnsafePointers withVaList zip"},t={cN:"type",b:"\\b[A-Z][\\wÀ-ʸ']*",r:0},n=e.C("/\\*","\\*/",{c:["self"]}),r={cN:"subst",b:/\\\(/,e:"\\)",k:i,c:[]},a={cN:"number",b:"\\b([\\d_]+(\\.[\\deE_]+)?|0x[a-fA-F0-9_]+(\\.[a-fA-F0-9p_]+)?|0b[01_]+|0o[0-7_]+)\\b",r:0},o=e.inherit(e.QSM,{c:[r,e.BE]});return r.c=[a],{k:i,c:[o,e.CLCM,n,t,a,{cN:"function",bK:"func",e:"{",eE:!0,c:[e.inherit(e.TM,{b:/[A-Za-z$_][0-9A-Za-z$_]*/}),{b://},{cN:"params",b:/\(/,e:/\)/,endsParent:!0,k:i,c:["self",a,o,e.CBCM,{b:":"}],i:/["']/}],i:/\[|%/},{cN:"class",bK:"struct protocol class extension enum",k:i,e:"\\{",eE:!0,c:[e.inherit(e.TM,{b:/[A-Za-z$_][\u00C0-\u02B80-9A-Za-z$_]*/})]},{cN:"meta",b:"(@warn_unused_result|@exported|@lazy|@noescape|@NSCopying|@NSManaged|@objc|@convention|@required|@noreturn|@IBAction|@IBDesignable|@IBInspectable|@IBOutlet|@infix|@prefix|@postfix|@autoclosure|@testable|@available|@nonobjc|@NSApplicationMain|@UIApplicationMain)"},{bK:"import",e:/$/,c:[e.CLCM,n]}]}});hljs.registerLanguage("cpp",function(t){var e={cN:"keyword",b:"\\b[a-z\\d_]*_t\\b"},r={cN:"string",v:[{b:'(u8?|U)?L?"',e:'"',i:"\\n",c:[t.BE]},{b:'(u8?|U)?R"',e:'"',c:[t.BE]},{b:"'\\\\?.",e:"'",i:"."}]},s={cN:"number",v:[{b:"\\b(0b[01']+)"},{b:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{b:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"}],r:0},i={cN:"meta",b:/#\s*[a-z]+\b/,e:/$/,k:{"meta-keyword":"if else elif endif define undef warning error line pragma ifdef ifndef include"},c:[{b:/\\\n/,r:0},t.inherit(r,{cN:"meta-string"}),{cN:"meta-string",b:/<[^\n>]*>/,e:/$/,i:"\\n"},t.CLCM,t.CBCM]},a=t.IR+"\\s*\\(",c={keyword:"int float while private char catch import module export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const for static_cast|10 union namespace unsigned long volatile static protected bool template mutable if public friend do goto auto void enum else break extern using asm case typeid short reinterpret_cast|10 default double register explicit signed typename try this switch continue inline delete alignof constexpr decltype noexcept static_assert thread_local restrict _Bool complex _Complex _Imaginary atomic_bool atomic_char atomic_schar atomic_uchar atomic_short atomic_ushort atomic_int atomic_uint atomic_long atomic_ulong atomic_llong atomic_ullong new throw return and or not",built_in:"std string cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap array shared_ptr abort abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf endl initializer_list unique_ptr",literal:"true false nullptr NULL"},n=[e,t.CLCM,t.CBCM,s,r];return{aliases:["c","cc","h","c++","h++","hpp"],k:c,i:"",k:c,c:["self",e]},{b:t.IR+"::",k:c},{v:[{b:/=/,e:/;/},{b:/\(/,e:/\)/},{bK:"new throw return else",e:/;/}],k:c,c:n.concat([{b:/\(/,e:/\)/,k:c,c:n.concat(["self"]),r:0}]),r:0},{cN:"function",b:"("+t.IR+"[\\*&\\s]+)+"+a,rB:!0,e:/[{;=]/,eE:!0,k:c,i:/[^\w\s\*&]/,c:[{b:a,rB:!0,c:[t.TM],r:0},{cN:"params",b:/\(/,e:/\)/,k:c,r:0,c:[t.CLCM,t.CBCM,r,s,e]},t.CLCM,t.CBCM,i]},{cN:"class",bK:"class struct",e:/[{;:]/,c:[{b://,c:["self"]},t.TM]}]),exports:{preprocessor:i,strings:r,k:c}}});hljs.registerLanguage("x86asm",function(s){return{cI:!0,l:"[.%]?"+s.IR,k:{keyword:"lock rep repe repz repne repnz xaquire xrelease bnd nobnd aaa aad aam aas adc add and arpl bb0_reset bb1_reset bound bsf bsr bswap bt btc btr bts call cbw cdq cdqe clc cld cli clts cmc cmp cmpsb cmpsd cmpsq cmpsw cmpxchg cmpxchg486 cmpxchg8b cmpxchg16b cpuid cpu_read cpu_write cqo cwd cwde daa das dec div dmint emms enter equ f2xm1 fabs fadd faddp fbld fbstp fchs fclex fcmovb fcmovbe fcmove fcmovnb fcmovnbe fcmovne fcmovnu fcmovu fcom fcomi fcomip fcomp fcompp fcos fdecstp fdisi fdiv fdivp fdivr fdivrp femms feni ffree ffreep fiadd ficom ficomp fidiv fidivr fild fimul fincstp finit fist fistp fisttp fisub fisubr fld fld1 fldcw fldenv fldl2e fldl2t fldlg2 fldln2 fldpi fldz fmul fmulp fnclex fndisi fneni fninit fnop fnsave fnstcw fnstenv fnstsw fpatan fprem fprem1 fptan frndint frstor fsave fscale fsetpm fsin fsincos fsqrt fst fstcw fstenv fstp fstsw fsub fsubp fsubr fsubrp ftst fucom fucomi fucomip fucomp fucompp fxam fxch fxtract fyl2x fyl2xp1 hlt ibts icebp idiv imul in inc incbin insb insd insw int int01 int1 int03 int3 into invd invpcid invlpg invlpga iret iretd iretq iretw jcxz jecxz jrcxz jmp jmpe lahf lar lds lea leave les lfence lfs lgdt lgs lidt lldt lmsw loadall loadall286 lodsb lodsd lodsq lodsw loop loope loopne loopnz loopz lsl lss ltr mfence monitor mov movd movq movsb movsd movsq movsw movsx movsxd movzx mul mwait neg nop not or out outsb outsd outsw packssdw packsswb packuswb paddb paddd paddsb paddsiw paddsw paddusb paddusw paddw pand pandn pause paveb pavgusb pcmpeqb pcmpeqd pcmpeqw pcmpgtb pcmpgtd pcmpgtw pdistib pf2id pfacc pfadd pfcmpeq pfcmpge pfcmpgt pfmax pfmin pfmul pfrcp pfrcpit1 pfrcpit2 pfrsqit1 pfrsqrt pfsub pfsubr pi2fd pmachriw pmaddwd pmagw pmulhriw pmulhrwa pmulhrwc pmulhw pmullw pmvgezb pmvlzb pmvnzb pmvzb pop popa popad popaw popf popfd popfq popfw por prefetch prefetchw pslld psllq psllw psrad psraw psrld psrlq psrlw psubb psubd psubsb psubsiw psubsw psubusb psubusw psubw punpckhbw punpckhdq punpckhwd punpcklbw punpckldq punpcklwd push pusha pushad pushaw pushf pushfd pushfq pushfw pxor rcl rcr rdshr rdmsr rdpmc rdtsc rdtscp ret retf retn rol ror rdm rsdc rsldt rsm rsts sahf sal salc sar sbb scasb scasd scasq scasw sfence sgdt shl shld shr shrd sidt sldt skinit smi smint smintold smsw stc std sti stosb stosd stosq stosw str sub svdc svldt svts swapgs syscall sysenter sysexit sysret test ud0 ud1 ud2b ud2 ud2a umov verr verw fwait wbinvd wrshr wrmsr xadd xbts xchg xlatb xlat xor cmove cmovz cmovne cmovnz cmova cmovnbe cmovae cmovnb cmovb cmovnae cmovbe cmovna cmovg cmovnle cmovge cmovnl cmovl cmovnge cmovle cmovng cmovc cmovnc cmovo cmovno cmovs cmovns cmovp cmovpe cmovnp cmovpo je jz jne jnz ja jnbe jae jnb jb jnae jbe jna jg jnle jge jnl jl jnge jle jng jc jnc jo jno js jns jpo jnp jpe jp sete setz setne setnz seta setnbe setae setnb setnc setb setnae setcset setbe setna setg setnle setge setnl setl setnge setle setng sets setns seto setno setpe setp setpo setnp addps addss andnps andps cmpeqps cmpeqss cmpleps cmpless cmpltps cmpltss cmpneqps cmpneqss cmpnleps cmpnless cmpnltps cmpnltss cmpordps cmpordss cmpunordps cmpunordss cmpps cmpss comiss cvtpi2ps cvtps2pi cvtsi2ss cvtss2si cvttps2pi cvttss2si divps divss ldmxcsr maxps maxss minps minss movaps movhps movlhps movlps movhlps movmskps movntps movss movups mulps mulss orps rcpps rcpss rsqrtps rsqrtss shufps sqrtps sqrtss stmxcsr subps subss ucomiss unpckhps unpcklps xorps fxrstor fxrstor64 fxsave fxsave64 xgetbv xsetbv xsave xsave64 xsaveopt xsaveopt64 xrstor xrstor64 prefetchnta prefetcht0 prefetcht1 prefetcht2 maskmovq movntq pavgb pavgw pextrw pinsrw pmaxsw pmaxub pminsw pminub pmovmskb pmulhuw psadbw pshufw pf2iw pfnacc pfpnacc pi2fw pswapd maskmovdqu clflush movntdq movnti movntpd movdqa movdqu movdq2q movq2dq paddq pmuludq pshufd pshufhw pshuflw pslldq psrldq psubq punpckhqdq punpcklqdq addpd addsd andnpd andpd cmpeqpd cmpeqsd cmplepd cmplesd cmpltpd cmpltsd cmpneqpd cmpneqsd cmpnlepd cmpnlesd cmpnltpd cmpnltsd cmpordpd cmpordsd cmpunordpd cmpunordsd cmppd comisd cvtdq2pd cvtdq2ps cvtpd2dq cvtpd2pi cvtpd2ps cvtpi2pd cvtps2dq cvtps2pd cvtsd2si cvtsd2ss cvtsi2sd cvtss2sd cvttpd2pi cvttpd2dq cvttps2dq cvttsd2si divpd divsd maxpd maxsd minpd minsd movapd movhpd movlpd movmskpd movupd mulpd mulsd orpd shufpd sqrtpd sqrtsd subpd subsd ucomisd unpckhpd unpcklpd xorpd addsubpd addsubps haddpd haddps hsubpd hsubps lddqu movddup movshdup movsldup clgi stgi vmcall vmclear vmfunc vmlaunch vmload vmmcall vmptrld vmptrst vmread vmresume vmrun vmsave vmwrite vmxoff vmxon invept invvpid pabsb pabsw pabsd palignr phaddw phaddd phaddsw phsubw phsubd phsubsw pmaddubsw pmulhrsw pshufb psignb psignw psignd extrq insertq movntsd movntss lzcnt blendpd blendps blendvpd blendvps dppd dpps extractps insertps movntdqa mpsadbw packusdw pblendvb pblendw pcmpeqq pextrb pextrd pextrq phminposuw pinsrb pinsrd pinsrq pmaxsb pmaxsd pmaxud pmaxuw pminsb pminsd pminud pminuw pmovsxbw pmovsxbd pmovsxbq pmovsxwd pmovsxwq pmovsxdq pmovzxbw pmovzxbd pmovzxbq pmovzxwd pmovzxwq pmovzxdq pmuldq pmulld ptest roundpd roundps roundsd roundss crc32 pcmpestri pcmpestrm pcmpistri pcmpistrm pcmpgtq popcnt getsec pfrcpv pfrsqrtv movbe aesenc aesenclast aesdec aesdeclast aesimc aeskeygenassist vaesenc vaesenclast vaesdec vaesdeclast vaesimc vaeskeygenassist vaddpd vaddps vaddsd vaddss vaddsubpd vaddsubps vandpd vandps vandnpd vandnps vblendpd vblendps vblendvpd vblendvps vbroadcastss vbroadcastsd vbroadcastf128 vcmpeq_ospd vcmpeqpd vcmplt_ospd vcmpltpd vcmple_ospd vcmplepd vcmpunord_qpd vcmpunordpd vcmpneq_uqpd vcmpneqpd vcmpnlt_uspd vcmpnltpd vcmpnle_uspd vcmpnlepd vcmpord_qpd vcmpordpd vcmpeq_uqpd vcmpnge_uspd vcmpngepd vcmpngt_uspd vcmpngtpd vcmpfalse_oqpd vcmpfalsepd vcmpneq_oqpd vcmpge_ospd vcmpgepd vcmpgt_ospd vcmpgtpd vcmptrue_uqpd vcmptruepd vcmplt_oqpd vcmple_oqpd vcmpunord_spd vcmpneq_uspd vcmpnlt_uqpd vcmpnle_uqpd vcmpord_spd vcmpeq_uspd vcmpnge_uqpd vcmpngt_uqpd vcmpfalse_ospd vcmpneq_ospd vcmpge_oqpd vcmpgt_oqpd vcmptrue_uspd vcmppd vcmpeq_osps vcmpeqps vcmplt_osps vcmpltps vcmple_osps vcmpleps vcmpunord_qps vcmpunordps vcmpneq_uqps vcmpneqps vcmpnlt_usps vcmpnltps vcmpnle_usps vcmpnleps vcmpord_qps vcmpordps vcmpeq_uqps vcmpnge_usps vcmpngeps vcmpngt_usps vcmpngtps vcmpfalse_oqps vcmpfalseps vcmpneq_oqps vcmpge_osps vcmpgeps vcmpgt_osps vcmpgtps vcmptrue_uqps vcmptrueps vcmplt_oqps vcmple_oqps vcmpunord_sps vcmpneq_usps vcmpnlt_uqps vcmpnle_uqps vcmpord_sps vcmpeq_usps vcmpnge_uqps vcmpngt_uqps vcmpfalse_osps vcmpneq_osps vcmpge_oqps vcmpgt_oqps vcmptrue_usps vcmpps vcmpeq_ossd vcmpeqsd vcmplt_ossd vcmpltsd vcmple_ossd vcmplesd vcmpunord_qsd vcmpunordsd vcmpneq_uqsd vcmpneqsd vcmpnlt_ussd vcmpnltsd vcmpnle_ussd vcmpnlesd vcmpord_qsd vcmpordsd vcmpeq_uqsd vcmpnge_ussd vcmpngesd vcmpngt_ussd vcmpngtsd vcmpfalse_oqsd vcmpfalsesd vcmpneq_oqsd vcmpge_ossd vcmpgesd vcmpgt_ossd vcmpgtsd vcmptrue_uqsd vcmptruesd vcmplt_oqsd vcmple_oqsd vcmpunord_ssd vcmpneq_ussd vcmpnlt_uqsd vcmpnle_uqsd vcmpord_ssd vcmpeq_ussd vcmpnge_uqsd vcmpngt_uqsd vcmpfalse_ossd vcmpneq_ossd vcmpge_oqsd vcmpgt_oqsd vcmptrue_ussd vcmpsd vcmpeq_osss vcmpeqss vcmplt_osss vcmpltss vcmple_osss vcmpless vcmpunord_qss vcmpunordss vcmpneq_uqss vcmpneqss vcmpnlt_usss vcmpnltss vcmpnle_usss vcmpnless vcmpord_qss vcmpordss vcmpeq_uqss vcmpnge_usss vcmpngess vcmpngt_usss vcmpngtss vcmpfalse_oqss vcmpfalsess vcmpneq_oqss vcmpge_osss vcmpgess vcmpgt_osss vcmpgtss vcmptrue_uqss vcmptruess vcmplt_oqss vcmple_oqss vcmpunord_sss vcmpneq_usss vcmpnlt_uqss vcmpnle_uqss vcmpord_sss vcmpeq_usss vcmpnge_uqss vcmpngt_uqss vcmpfalse_osss vcmpneq_osss vcmpge_oqss vcmpgt_oqss vcmptrue_usss vcmpss vcomisd vcomiss vcvtdq2pd vcvtdq2ps vcvtpd2dq vcvtpd2ps vcvtps2dq vcvtps2pd vcvtsd2si vcvtsd2ss vcvtsi2sd vcvtsi2ss vcvtss2sd vcvtss2si vcvttpd2dq vcvttps2dq vcvttsd2si vcvttss2si vdivpd vdivps vdivsd vdivss vdppd vdpps vextractf128 vextractps vhaddpd vhaddps vhsubpd vhsubps vinsertf128 vinsertps vlddqu vldqqu vldmxcsr vmaskmovdqu vmaskmovps vmaskmovpd vmaxpd vmaxps vmaxsd vmaxss vminpd vminps vminsd vminss vmovapd vmovaps vmovd vmovq vmovddup vmovdqa vmovqqa vmovdqu vmovqqu vmovhlps vmovhpd vmovhps vmovlhps vmovlpd vmovlps vmovmskpd vmovmskps vmovntdq vmovntqq vmovntdqa vmovntpd vmovntps vmovsd vmovshdup vmovsldup vmovss vmovupd vmovups vmpsadbw vmulpd vmulps vmulsd vmulss vorpd vorps vpabsb vpabsw vpabsd vpacksswb vpackssdw vpackuswb vpackusdw vpaddb vpaddw vpaddd vpaddq vpaddsb vpaddsw vpaddusb vpaddusw vpalignr vpand vpandn vpavgb vpavgw vpblendvb vpblendw vpcmpestri vpcmpestrm vpcmpistri vpcmpistrm vpcmpeqb vpcmpeqw vpcmpeqd vpcmpeqq vpcmpgtb vpcmpgtw vpcmpgtd vpcmpgtq vpermilpd vpermilps vperm2f128 vpextrb vpextrw vpextrd vpextrq vphaddw vphaddd vphaddsw vphminposuw vphsubw vphsubd vphsubsw vpinsrb vpinsrw vpinsrd vpinsrq vpmaddwd vpmaddubsw vpmaxsb vpmaxsw vpmaxsd vpmaxub vpmaxuw vpmaxud vpminsb vpminsw vpminsd vpminub vpminuw vpminud vpmovmskb vpmovsxbw vpmovsxbd vpmovsxbq vpmovsxwd vpmovsxwq vpmovsxdq vpmovzxbw vpmovzxbd vpmovzxbq vpmovzxwd vpmovzxwq vpmovzxdq vpmulhuw vpmulhrsw vpmulhw vpmullw vpmulld vpmuludq vpmuldq vpor vpsadbw vpshufb vpshufd vpshufhw vpshuflw vpsignb vpsignw vpsignd vpslldq vpsrldq vpsllw vpslld vpsllq vpsraw vpsrad vpsrlw vpsrld vpsrlq vptest vpsubb vpsubw vpsubd vpsubq vpsubsb vpsubsw vpsubusb vpsubusw vpunpckhbw vpunpckhwd vpunpckhdq vpunpckhqdq vpunpcklbw vpunpcklwd vpunpckldq vpunpcklqdq vpxor vrcpps vrcpss vrsqrtps vrsqrtss vroundpd vroundps vroundsd vroundss vshufpd vshufps vsqrtpd vsqrtps vsqrtsd vsqrtss vstmxcsr vsubpd vsubps vsubsd vsubss vtestps vtestpd vucomisd vucomiss vunpckhpd vunpckhps vunpcklpd vunpcklps vxorpd vxorps vzeroall vzeroupper pclmullqlqdq pclmulhqlqdq pclmullqhqdq pclmulhqhqdq pclmulqdq vpclmullqlqdq vpclmulhqlqdq vpclmullqhqdq vpclmulhqhqdq vpclmulqdq vfmadd132ps vfmadd132pd vfmadd312ps vfmadd312pd vfmadd213ps vfmadd213pd vfmadd123ps vfmadd123pd vfmadd231ps vfmadd231pd vfmadd321ps vfmadd321pd vfmaddsub132ps vfmaddsub132pd vfmaddsub312ps vfmaddsub312pd vfmaddsub213ps vfmaddsub213pd vfmaddsub123ps vfmaddsub123pd vfmaddsub231ps vfmaddsub231pd vfmaddsub321ps vfmaddsub321pd vfmsub132ps vfmsub132pd vfmsub312ps vfmsub312pd vfmsub213ps vfmsub213pd vfmsub123ps vfmsub123pd vfmsub231ps vfmsub231pd vfmsub321ps vfmsub321pd vfmsubadd132ps vfmsubadd132pd vfmsubadd312ps vfmsubadd312pd vfmsubadd213ps vfmsubadd213pd vfmsubadd123ps vfmsubadd123pd vfmsubadd231ps vfmsubadd231pd vfmsubadd321ps vfmsubadd321pd vfnmadd132ps vfnmadd132pd vfnmadd312ps vfnmadd312pd vfnmadd213ps vfnmadd213pd vfnmadd123ps vfnmadd123pd vfnmadd231ps vfnmadd231pd vfnmadd321ps vfnmadd321pd vfnmsub132ps vfnmsub132pd vfnmsub312ps vfnmsub312pd vfnmsub213ps vfnmsub213pd vfnmsub123ps vfnmsub123pd vfnmsub231ps vfnmsub231pd vfnmsub321ps vfnmsub321pd vfmadd132ss vfmadd132sd vfmadd312ss vfmadd312sd vfmadd213ss vfmadd213sd vfmadd123ss vfmadd123sd vfmadd231ss vfmadd231sd vfmadd321ss vfmadd321sd vfmsub132ss vfmsub132sd vfmsub312ss vfmsub312sd vfmsub213ss vfmsub213sd vfmsub123ss vfmsub123sd vfmsub231ss vfmsub231sd vfmsub321ss vfmsub321sd vfnmadd132ss vfnmadd132sd vfnmadd312ss vfnmadd312sd vfnmadd213ss vfnmadd213sd vfnmadd123ss vfnmadd123sd vfnmadd231ss vfnmadd231sd vfnmadd321ss vfnmadd321sd vfnmsub132ss vfnmsub132sd vfnmsub312ss vfnmsub312sd vfnmsub213ss vfnmsub213sd vfnmsub123ss vfnmsub123sd vfnmsub231ss vfnmsub231sd vfnmsub321ss vfnmsub321sd rdfsbase rdgsbase rdrand wrfsbase wrgsbase vcvtph2ps vcvtps2ph adcx adox rdseed clac stac xstore xcryptecb xcryptcbc xcryptctr xcryptcfb xcryptofb montmul xsha1 xsha256 llwpcb slwpcb lwpval lwpins vfmaddpd vfmaddps vfmaddsd vfmaddss vfmaddsubpd vfmaddsubps vfmsubaddpd vfmsubaddps vfmsubpd vfmsubps vfmsubsd vfmsubss vfnmaddpd vfnmaddps vfnmaddsd vfnmaddss vfnmsubpd vfnmsubps vfnmsubsd vfnmsubss vfrczpd vfrczps vfrczsd vfrczss vpcmov vpcomb vpcomd vpcomq vpcomub vpcomud vpcomuq vpcomuw vpcomw vphaddbd vphaddbq vphaddbw vphadddq vphaddubd vphaddubq vphaddubw vphaddudq vphadduwd vphadduwq vphaddwd vphaddwq vphsubbw vphsubdq vphsubwd vpmacsdd vpmacsdqh vpmacsdql vpmacssdd vpmacssdqh vpmacssdql vpmacsswd vpmacssww vpmacswd vpmacsww vpmadcsswd vpmadcswd vpperm vprotb vprotd vprotq vprotw vpshab vpshad vpshaq vpshaw vpshlb vpshld vpshlq vpshlw vbroadcasti128 vpblendd vpbroadcastb vpbroadcastw vpbroadcastd vpbroadcastq vpermd vpermpd vpermps vpermq vperm2i128 vextracti128 vinserti128 vpmaskmovd vpmaskmovq vpsllvd vpsllvq vpsravd vpsrlvd vpsrlvq vgatherdpd vgatherqpd vgatherdps vgatherqps vpgatherdd vpgatherqd vpgatherdq vpgatherqq xabort xbegin xend xtest andn bextr blci blcic blsi blsic blcfill blsfill blcmsk blsmsk blsr blcs bzhi mulx pdep pext rorx sarx shlx shrx tzcnt tzmsk t1mskc valignd valignq vblendmpd vblendmps vbroadcastf32x4 vbroadcastf64x4 vbroadcasti32x4 vbroadcasti64x4 vcompresspd vcompressps vcvtpd2udq vcvtps2udq vcvtsd2usi vcvtss2usi vcvttpd2udq vcvttps2udq vcvttsd2usi vcvttss2usi vcvtudq2pd vcvtudq2ps vcvtusi2sd vcvtusi2ss vexpandpd vexpandps vextractf32x4 vextractf64x4 vextracti32x4 vextracti64x4 vfixupimmpd vfixupimmps vfixupimmsd vfixupimmss vgetexppd vgetexpps vgetexpsd vgetexpss vgetmantpd vgetmantps vgetmantsd vgetmantss vinsertf32x4 vinsertf64x4 vinserti32x4 vinserti64x4 vmovdqa32 vmovdqa64 vmovdqu32 vmovdqu64 vpabsq vpandd vpandnd vpandnq vpandq vpblendmd vpblendmq vpcmpltd vpcmpled vpcmpneqd vpcmpnltd vpcmpnled vpcmpd vpcmpltq vpcmpleq vpcmpneqq vpcmpnltq vpcmpnleq vpcmpq vpcmpequd vpcmpltud vpcmpleud vpcmpnequd vpcmpnltud vpcmpnleud vpcmpud vpcmpequq vpcmpltuq vpcmpleuq vpcmpnequq vpcmpnltuq vpcmpnleuq vpcmpuq vpcompressd vpcompressq vpermi2d vpermi2pd vpermi2ps vpermi2q vpermt2d vpermt2pd vpermt2ps vpermt2q vpexpandd vpexpandq vpmaxsq vpmaxuq vpminsq vpminuq vpmovdb vpmovdw vpmovqb vpmovqd vpmovqw vpmovsdb vpmovsdw vpmovsqb vpmovsqd vpmovsqw vpmovusdb vpmovusdw vpmovusqb vpmovusqd vpmovusqw vpord vporq vprold vprolq vprolvd vprolvq vprord vprorq vprorvd vprorvq vpscatterdd vpscatterdq vpscatterqd vpscatterqq vpsraq vpsravq vpternlogd vpternlogq vptestmd vptestmq vptestnmd vptestnmq vpxord vpxorq vrcp14pd vrcp14ps vrcp14sd vrcp14ss vrndscalepd vrndscaleps vrndscalesd vrndscaless vrsqrt14pd vrsqrt14ps vrsqrt14sd vrsqrt14ss vscalefpd vscalefps vscalefsd vscalefss vscatterdpd vscatterdps vscatterqpd vscatterqps vshuff32x4 vshuff64x2 vshufi32x4 vshufi64x2 kandnw kandw kmovw knotw kortestw korw kshiftlw kshiftrw kunpckbw kxnorw kxorw vpbroadcastmb2q vpbroadcastmw2d vpconflictd vpconflictq vplzcntd vplzcntq vexp2pd vexp2ps vrcp28pd vrcp28ps vrcp28sd vrcp28ss vrsqrt28pd vrsqrt28ps vrsqrt28sd vrsqrt28ss vgatherpf0dpd vgatherpf0dps vgatherpf0qpd vgatherpf0qps vgatherpf1dpd vgatherpf1dps vgatherpf1qpd vgatherpf1qps vscatterpf0dpd vscatterpf0dps vscatterpf0qpd vscatterpf0qps vscatterpf1dpd vscatterpf1dps vscatterpf1qpd vscatterpf1qps prefetchwt1 bndmk bndcl bndcu bndcn bndmov bndldx bndstx sha1rnds4 sha1nexte sha1msg1 sha1msg2 sha256rnds2 sha256msg1 sha256msg2 hint_nop0 hint_nop1 hint_nop2 hint_nop3 hint_nop4 hint_nop5 hint_nop6 hint_nop7 hint_nop8 hint_nop9 hint_nop10 hint_nop11 hint_nop12 hint_nop13 hint_nop14 hint_nop15 hint_nop16 hint_nop17 hint_nop18 hint_nop19 hint_nop20 hint_nop21 hint_nop22 hint_nop23 hint_nop24 hint_nop25 hint_nop26 hint_nop27 hint_nop28 hint_nop29 hint_nop30 hint_nop31 hint_nop32 hint_nop33 hint_nop34 hint_nop35 hint_nop36 hint_nop37 hint_nop38 hint_nop39 hint_nop40 hint_nop41 hint_nop42 hint_nop43 hint_nop44 hint_nop45 hint_nop46 hint_nop47 hint_nop48 hint_nop49 hint_nop50 hint_nop51 hint_nop52 hint_nop53 hint_nop54 hint_nop55 hint_nop56 hint_nop57 hint_nop58 hint_nop59 hint_nop60 hint_nop61 hint_nop62 hint_nop63",built_in:"ip eip rip al ah bl bh cl ch dl dh sil dil bpl spl r8b r9b r10b r11b r12b r13b r14b r15b ax bx cx dx si di bp sp r8w r9w r10w r11w r12w r13w r14w r15w eax ebx ecx edx esi edi ebp esp eip r8d r9d r10d r11d r12d r13d r14d r15d rax rbx rcx rdx rsi rdi rbp rsp r8 r9 r10 r11 r12 r13 r14 r15 cs ds es fs gs ss st st0 st1 st2 st3 st4 st5 st6 st7 mm0 mm1 mm2 mm3 mm4 mm5 mm6 mm7 xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13 xmm14 xmm15 xmm16 xmm17 xmm18 xmm19 xmm20 xmm21 xmm22 xmm23 xmm24 xmm25 xmm26 xmm27 xmm28 xmm29 xmm30 xmm31 ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm8 ymm9 ymm10 ymm11 ymm12 ymm13 ymm14 ymm15 ymm16 ymm17 ymm18 ymm19 ymm20 ymm21 ymm22 ymm23 ymm24 ymm25 ymm26 ymm27 ymm28 ymm29 ymm30 ymm31 zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm9 zmm10 zmm11 zmm12 zmm13 zmm14 zmm15 zmm16 zmm17 zmm18 zmm19 zmm20 zmm21 zmm22 zmm23 zmm24 zmm25 zmm26 zmm27 zmm28 zmm29 zmm30 zmm31 k0 k1 k2 k3 k4 k5 k6 k7 bnd0 bnd1 bnd2 bnd3 cr0 cr1 cr2 cr3 cr4 cr8 dr0 dr1 dr2 dr3 dr8 tr3 tr4 tr5 tr6 tr7 r0 r1 r2 r3 r4 r5 r6 r7 r0b r1b r2b r3b r4b r5b r6b r7b r0w r1w r2w r3w r4w r5w r6w r7w r0d r1d r2d r3d r4d r5d r6d r7d r0h r1h r2h r3h r0l r1l r2l r3l r4l r5l r6l r7l r8l r9l r10l r11l r12l r13l r14l r15l db dw dd dq dt ddq do dy dz resb resw resd resq rest resdq reso resy resz incbin equ times byte word dword qword nosplit rel abs seg wrt strict near far a32 ptr",meta:"%define %xdefine %+ %undef %defstr %deftok %assign %strcat %strlen %substr %rotate %elif %else %endif %if %ifmacro %ifctx %ifidn %ifidni %ifid %ifnum %ifstr %iftoken %ifempty %ifenv %error %warning %fatal %rep %endrep %include %push %pop %repl %pathsearch %depend %use %arg %stacksize %local %line %comment %endcomment .nolist __FILE__ __LINE__ __SECT__ __BITS__ __OUTPUT_FORMAT__ __DATE__ __TIME__ __DATE_NUM__ __TIME_NUM__ __UTC_DATE__ __UTC_TIME__ __UTC_DATE_NUM__ __UTC_TIME_NUM__ __PASS__ struc endstruc istruc at iend align alignb sectalign daz nodaz up down zero default option assume public bits use16 use32 use64 default section segment absolute extern global common cpu float __utf16__ __utf16le__ __utf16be__ __utf32__ __utf32le__ __utf32be__ __float8__ __float16__ __float32__ __float64__ __float80m__ __float80e__ __float128l__ __float128h__ __Infinity__ __QNaN__ __SNaN__ Inf NaN QNaN SNaN float8 float16 float32 float64 float80m float80e float128l float128h __FLOAT_DAZ__ __FLOAT_ROUND__ __FLOAT__"},c:[s.C(";","$",{r:0}),{cN:"number",v:[{b:"\\b(?:([0-9][0-9_]*)?\\.[0-9_]*(?:[eE][+-]?[0-9_]+)?|(0[Xx])?[0-9][0-9_]*\\.?[0-9_]*(?:[pP](?:[+-]?[0-9_]+)?)?)\\b",r:0},{b:"\\$[0-9][0-9A-Fa-f]*",r:0},{b:"\\b(?:[0-9A-Fa-f][0-9A-Fa-f_]*[Hh]|[0-9][0-9_]*[DdTt]?|[0-7][0-7_]*[QqOo]|[0-1][0-1_]*[BbYy])\\b"},{b:"\\b(?:0[Xx][0-9A-Fa-f_]+|0[DdTt][0-9_]+|0[QqOo][0-7_]+|0[BbYy][0-1_]+)\\b"}]},s.QSM,{cN:"string",v:[{b:"'",e:"[^\\\\]'"},{b:"`",e:"[^\\\\]`"}],r:0},{cN:"symbol",v:[{b:"^\\s*[A-Za-z._?][A-Za-z0-9_$#@~.?]*(:|\\s+label)"},{b:"^\\s*%%[A-Za-z0-9_$#@~.?]*:"}],r:0},{cN:"subst",b:"%[0-9]+",r:0},{cN:"subst",b:"%!S+",r:0},{cN:"meta",b:/^\s*\.[\w_-]+/}]}});hljs.registerLanguage("bash",function(e){var t={cN:"variable",v:[{b:/\$[\w\d#@][\w\d_]*/},{b:/\$\{(.*?)}/}]},s={cN:"string",b:/"/,e:/"/,c:[e.BE,t,{cN:"variable",b:/\$\(/,e:/\)/,c:[e.BE]}]},a={cN:"string",b:/'/,e:/'/};return{aliases:["sh","zsh"],l:/\b-?[a-z\._]+\b/,k:{keyword:"if then else elif fi for while in do done case esac function",literal:"true false",built_in:"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp",_:"-ne -eq -lt -gt -f -d -e -s -l -a"},c:[{cN:"meta",b:/^#![^\n]+sh\s*$/,r:10},{cN:"function",b:/\w[\w\d_]*\s*\(\s*\)\s*\{/,rB:!0,c:[e.inherit(e.TM,{b:/\w[\w\d_]*/})],r:0},e.HCM,s,a,t]}});hljs.registerLanguage("shell",function(s){return{aliases:["console"],c:[{cN:"meta",b:"^\\s{0,3}[\\w\\d\\[\\]()@-]*[>%$#]",starts:{e:"$",sL:"bash"}}]}});hljs.registerLanguage("http",function(e){var t="HTTP/[0-9\\.]+";return{aliases:["https"],i:"\\S",c:[{b:"^"+t,e:"$",c:[{cN:"number",b:"\\b\\d{3}\\b"}]},{b:"^[A-Z]+ (.*?) "+t+"$",rB:!0,e:"$",c:[{cN:"string",b:" ",e:" ",eB:!0,eE:!0},{b:t},{cN:"keyword",b:"[A-Z]+"}]},{cN:"attribute",b:"^\\w",e:": ",eE:!0,i:"\\n|\\s|=",starts:{e:"$",r:0}},{b:"\\n\\n",starts:{sL:[],eW:!0}}]}});hljs.registerLanguage("cs",function(e){var i={keyword:"abstract as base bool break byte case catch char checked const continue decimal default delegate do double enum event explicit extern finally fixed float for foreach goto if implicit in int interface internal is lock long nameof object operator out override params private protected public readonly ref sbyte sealed short sizeof stackalloc static string struct switch this try typeof uint ulong unchecked unsafe ushort using virtual void volatile while add alias ascending async await by descending dynamic equals from get global group into join let on orderby partial remove select set value var where yield",literal:"null false true"},t={cN:"string",b:'@"',e:'"',c:[{b:'""'}]},r=e.inherit(t,{i:/\n/}),a={cN:"subst",b:"{",e:"}",k:i},c=e.inherit(a,{i:/\n/}),n={cN:"string",b:/\$"/,e:'"',i:/\n/,c:[{b:"{{"},{b:"}}"},e.BE,c]},s={cN:"string",b:/\$@"/,e:'"',c:[{b:"{{"},{b:"}}"},{b:'""'},a]},o=e.inherit(s,{i:/\n/,c:[{b:"{{"},{b:"}}"},{b:'""'},c]});a.c=[s,n,t,e.ASM,e.QSM,e.CNM,e.CBCM],c.c=[o,n,r,e.ASM,e.QSM,e.CNM,e.inherit(e.CBCM,{i:/\n/})];var l={v:[s,n,t,e.ASM,e.QSM]},b=e.IR+"(<"+e.IR+"(\\s*,\\s*"+e.IR+")*>)?(\\[\\])?";return{aliases:["csharp"],k:i,i:/::/,c:[e.C("///","$",{rB:!0,c:[{cN:"doctag",v:[{b:"///",r:0},{b:""},{b:""}]}]}),e.CLCM,e.CBCM,{cN:"meta",b:"#",e:"$",k:{"meta-keyword":"if else elif endif define undef warning error line region endregion pragma checksum"}},l,e.CNM,{bK:"class interface",e:/[{;=]/,i:/[^\s:]/,c:[e.TM,e.CLCM,e.CBCM]},{bK:"namespace",e:/[{;=]/,i:/[^\s:]/,c:[e.inherit(e.TM,{b:"[a-zA-Z](\\.?\\w)*"}),e.CLCM,e.CBCM]},{cN:"meta",b:"^\\s*\\[",eB:!0,e:"\\]",eE:!0,c:[{cN:"meta-string",b:/"/,e:/"/}]},{bK:"new return throw await else",r:0},{cN:"function",b:"("+b+"\\s+)+"+e.IR+"\\s*\\(",rB:!0,e:/[{;=]/,eE:!0,k:i,c:[{b:e.IR+"\\s*\\(",rB:!0,c:[e.TM],r:0},{cN:"params",b:/\(/,e:/\)/,eB:!0,eE:!0,k:i,r:0,c:[l,e.CNM,e.CBCM]},e.CLCM,e.CBCM]}]}});hljs.registerLanguage("coffeescript",function(e){var c={keyword:"in if for while finally new do return else break catch instanceof throw try this switch continue typeof delete debugger super yield import export from as default await then unless until loop of by when and or is isnt not",literal:"true false null undefined yes no on off",built_in:"npm require console print module global window document"},n="[A-Za-z$_][0-9A-Za-z$_]*",r={cN:"subst",b:/#\{/,e:/}/,k:c},i=[e.BNM,e.inherit(e.CNM,{starts:{e:"(\\s*/)?",r:0}}),{cN:"string",v:[{b:/'''/,e:/'''/,c:[e.BE]},{b:/'/,e:/'/,c:[e.BE]},{b:/"""/,e:/"""/,c:[e.BE,r]},{b:/"/,e:/"/,c:[e.BE,r]}]},{cN:"regexp",v:[{b:"///",e:"///",c:[r,e.HCM]},{b:"//[gim]*",r:0},{b:/\/(?![ *])(\\\/|.)*?\/[gim]*(?=\W|$)/}]},{b:"@"+n},{sL:"javascript",eB:!0,eE:!0,v:[{b:"```",e:"```"},{b:"`",e:"`"}]}];r.c=i;var s=e.inherit(e.TM,{b:n}),t="(\\(.*\\))?\\s*\\B[-=]>",o={cN:"params",b:"\\([^\\(]",rB:!0,c:[{b:/\(/,e:/\)/,k:c,c:["self"].concat(i)}]};return{aliases:["coffee","cson","iced"],k:c,i:/\/\*/,c:i.concat([e.C("###","###"),e.HCM,{cN:"function",b:"^\\s*"+n+"\\s*=\\s*"+t,e:"[-=]>",rB:!0,c:[s,o]},{b:/[:\(,=]\s*/,r:0,c:[{cN:"function",b:t,e:"[-=]>",rB:!0,c:[o]}]},{cN:"class",bK:"class",e:"$",i:/[:="\[\]]/,c:[{bK:"extends",eW:!0,i:/[:="\[\]]/,c:[s]},s]},{b:n+":",e:":",rB:!0,rE:!0,r:0}])}});hljs.registerLanguage("sql",function(e){var t=e.C("--","$");return{cI:!0,i:/[<>{}*#]/,c:[{bK:"begin end start commit rollback savepoint lock alter create drop rename call delete do handler insert load replace select truncate update set show pragma grant merge describe use explain help declare prepare execute deallocate release unlock purge reset change stop analyze cache flush optimize repair kill install uninstall checksum restore check backup revoke comment",e:/;/,eW:!0,l:/[\w\.]+/,k:{keyword:"abort abs absolute acc acce accep accept access accessed accessible account acos action activate add addtime admin administer advanced advise aes_decrypt aes_encrypt after agent aggregate ali alia alias allocate allow alter always analyze ancillary and any anydata anydataset anyschema anytype apply archive archived archivelog are as asc ascii asin assembly assertion associate asynchronous at atan atn2 attr attri attrib attribu attribut attribute attributes audit authenticated authentication authid authors auto autoallocate autodblink autoextend automatic availability avg backup badfile basicfile before begin beginning benchmark between bfile bfile_base big bigfile bin binary_double binary_float binlog bit_and bit_count bit_length bit_or bit_xor bitmap blob_base block blocksize body both bound buffer_cache buffer_pool build bulk by byte byteordermark bytes cache caching call calling cancel capacity cascade cascaded case cast catalog category ceil ceiling chain change changed char_base char_length character_length characters characterset charindex charset charsetform charsetid check checksum checksum_agg child choose chr chunk class cleanup clear client clob clob_base clone close cluster_id cluster_probability cluster_set clustering coalesce coercibility col collate collation collect colu colum column column_value columns columns_updated comment commit compact compatibility compiled complete composite_limit compound compress compute concat concat_ws concurrent confirm conn connec connect connect_by_iscycle connect_by_isleaf connect_by_root connect_time connection consider consistent constant constraint constraints constructor container content contents context contributors controlfile conv convert convert_tz corr corr_k corr_s corresponding corruption cos cost count count_big counted covar_pop covar_samp cpu_per_call cpu_per_session crc32 create creation critical cross cube cume_dist curdate current current_date current_time current_timestamp current_user cursor curtime customdatum cycle data database databases datafile datafiles datalength date_add date_cache date_format date_sub dateadd datediff datefromparts datename datepart datetime2fromparts day day_to_second dayname dayofmonth dayofweek dayofyear days db_role_change dbtimezone ddl deallocate declare decode decompose decrement decrypt deduplicate def defa defau defaul default defaults deferred defi defin define degrees delayed delegate delete delete_all delimited demand dense_rank depth dequeue des_decrypt des_encrypt des_key_file desc descr descri describ describe descriptor deterministic diagnostics difference dimension direct_load directory disable disable_all disallow disassociate discardfile disconnect diskgroup distinct distinctrow distribute distributed div do document domain dotnet double downgrade drop dumpfile duplicate duration each edition editionable editions element ellipsis else elsif elt empty enable enable_all enclosed encode encoding encrypt end end-exec endian enforced engine engines enqueue enterprise entityescaping eomonth error errors escaped evalname evaluate event eventdata events except exception exceptions exchange exclude excluding execu execut execute exempt exists exit exp expire explain export export_set extended extent external external_1 external_2 externally extract failed failed_login_attempts failover failure far fast feature_set feature_value fetch field fields file file_name_convert filesystem_like_logging final finish first first_value fixed flash_cache flashback floor flush following follows for forall force form forma format found found_rows freelist freelists freepools fresh from from_base64 from_days ftp full function general generated get get_format get_lock getdate getutcdate global global_name globally go goto grant grants greatest group group_concat group_id grouping grouping_id groups gtid_subtract guarantee guard handler hash hashkeys having hea head headi headin heading heap help hex hierarchy high high_priority hosts hour http id ident_current ident_incr ident_seed identified identity idle_time if ifnull ignore iif ilike ilm immediate import in include including increment index indexes indexing indextype indicator indices inet6_aton inet6_ntoa inet_aton inet_ntoa infile initial initialized initially initrans inmemory inner innodb input insert install instance instantiable instr interface interleaved intersect into invalidate invisible is is_free_lock is_ipv4 is_ipv4_compat is_not is_not_null is_used_lock isdate isnull isolation iterate java join json json_exists keep keep_duplicates key keys kill language large last last_day last_insert_id last_value lax lcase lead leading least leaves left len lenght length less level levels library like like2 like4 likec limit lines link list listagg little ln load load_file lob lobs local localtime localtimestamp locate locator lock locked log log10 log2 logfile logfiles logging logical logical_reads_per_call logoff logon logs long loop low low_priority lower lpad lrtrim ltrim main make_set makedate maketime managed management manual map mapping mask master master_pos_wait match matched materialized max maxextents maximize maxinstances maxlen maxlogfiles maxloghistory maxlogmembers maxsize maxtrans md5 measures median medium member memcompress memory merge microsecond mid migration min minextents minimum mining minus minute minvalue missing mod mode model modification modify module monitoring month months mount move movement multiset mutex name name_const names nan national native natural nav nchar nclob nested never new newline next nextval no no_write_to_binlog noarchivelog noaudit nobadfile nocheck nocompress nocopy nocycle nodelay nodiscardfile noentityescaping noguarantee nokeep nologfile nomapping nomaxvalue nominimize nominvalue nomonitoring none noneditionable nonschema noorder nopr nopro noprom nopromp noprompt norely noresetlogs noreverse normal norowdependencies noschemacheck noswitch not nothing notice notrim novalidate now nowait nth_value nullif nulls num numb numbe nvarchar nvarchar2 object ocicoll ocidate ocidatetime ociduration ociinterval ociloblocator ocinumber ociref ocirefcursor ocirowid ocistring ocitype oct octet_length of off offline offset oid oidindex old on online only opaque open operations operator optimal optimize option optionally or oracle oracle_date oradata ord ordaudio orddicom orddoc order ordimage ordinality ordvideo organization orlany orlvary out outer outfile outline output over overflow overriding package pad parallel parallel_enable parameters parent parse partial partition partitions pascal passing password password_grace_time password_lock_time password_reuse_max password_reuse_time password_verify_function patch path patindex pctincrease pctthreshold pctused pctversion percent percent_rank percentile_cont percentile_disc performance period period_add period_diff permanent physical pi pipe pipelined pivot pluggable plugin policy position post_transaction pow power pragma prebuilt precedes preceding precision prediction prediction_cost prediction_details prediction_probability prediction_set prepare present preserve prior priority private private_sga privileges procedural procedure procedure_analyze processlist profiles project prompt protection public publishingservername purge quarter query quick quiesce quota quotename radians raise rand range rank raw read reads readsize rebuild record records recover recovery recursive recycle redo reduced ref reference referenced references referencing refresh regexp_like register regr_avgx regr_avgy regr_count regr_intercept regr_r2 regr_slope regr_sxx regr_sxy reject rekey relational relative relaylog release release_lock relies_on relocate rely rem remainder rename repair repeat replace replicate replication required reset resetlogs resize resource respect restore restricted result result_cache resumable resume retention return returning returns reuse reverse revoke right rlike role roles rollback rolling rollup round row row_count rowdependencies rowid rownum rows rtrim rules safe salt sample save savepoint sb1 sb2 sb4 scan schema schemacheck scn scope scroll sdo_georaster sdo_topo_geometry search sec_to_time second section securefile security seed segment select self sequence sequential serializable server servererror session session_user sessions_per_user set sets settings sha sha1 sha2 share shared shared_pool short show shrink shutdown si_averagecolor si_colorhistogram si_featurelist si_positionalcolor si_stillimage si_texture siblings sid sign sin size size_t sizes skip slave sleep smalldatetimefromparts smallfile snapshot some soname sort soundex source space sparse spfile split sql sql_big_result sql_buffer_result sql_cache sql_calc_found_rows sql_small_result sql_variant_property sqlcode sqldata sqlerror sqlname sqlstate sqrt square standalone standby start starting startup statement static statistics stats_binomial_test stats_crosstab stats_ks_test stats_mode stats_mw_test stats_one_way_anova stats_t_test_ stats_t_test_indep stats_t_test_one stats_t_test_paired stats_wsr_test status std stddev stddev_pop stddev_samp stdev stop storage store stored str str_to_date straight_join strcmp strict string struct stuff style subdate subpartition subpartitions substitutable substr substring subtime subtring_index subtype success sum suspend switch switchoffset switchover sync synchronous synonym sys sys_xmlagg sysasm sysaux sysdate sysdatetimeoffset sysdba sysoper system system_user sysutcdatetime table tables tablespace tan tdo template temporary terminated tertiary_weights test than then thread through tier ties time time_format time_zone timediff timefromparts timeout timestamp timestampadd timestampdiff timezone_abbr timezone_minute timezone_region to to_base64 to_date to_days to_seconds todatetimeoffset trace tracking transaction transactional translate translation treat trigger trigger_nestlevel triggers trim truncate try_cast try_convert try_parse type ub1 ub2 ub4 ucase unarchived unbounded uncompress under undo unhex unicode uniform uninstall union unique unix_timestamp unknown unlimited unlock unpivot unrecoverable unsafe unsigned until untrusted unusable unused update updated upgrade upped upper upsert url urowid usable usage use use_stored_outlines user user_data user_resources users using utc_date utc_timestamp uuid uuid_short validate validate_password_strength validation valist value values var var_samp varcharc vari varia variab variabl variable variables variance varp varraw varrawc varray verify version versions view virtual visible void wait wallet warning warnings week weekday weekofyear wellformed when whene whenev wheneve whenever where while whitespace with within without work wrapped xdb xml xmlagg xmlattributes xmlcast xmlcolattval xmlelement xmlexists xmlforest xmlindex xmlnamespaces xmlpi xmlquery xmlroot xmlschema xmlserialize xmltable xmltype xor year year_to_month years yearweek",literal:"true false null",built_in:"array bigint binary bit blob boolean char character date dec decimal float int int8 integer interval number numeric real record serial serial8 smallint text varchar varying void"},c:[{cN:"string",b:"'",e:"'",c:[e.BE,{b:"''"}]},{cN:"string",b:'"',e:'"',c:[e.BE,{b:'""'}]},{cN:"string",b:"`",e:"`",c:[e.BE]},e.CNM,e.CBCM,t]},e.CBCM,t]}});hljs.registerLanguage("apache",function(e){var r={cN:"number",b:"[\\$%]\\d+"};return{aliases:["apacheconf"],cI:!0,c:[e.HCM,{cN:"section",b:""},{cN:"attribute",b:/\w+/,r:0,k:{nomarkup:"order deny allow setenv rewriterule rewriteengine rewritecond documentroot sethandler errordocument loadmodule options header listen serverroot servername"},starts:{e:/$/,r:0,k:{literal:"on off all"},c:[{cN:"meta",b:"\\s\\[",e:"\\]$"},{cN:"variable",b:"[\\$%]\\{",e:"\\}",c:["self",r]},r,e.QSM]}}],i:/\S/}});hljs.registerLanguage("haskell",function(e){var i={v:[e.C("--","$"),e.C("{-","-}",{c:["self"]})]},a={cN:"meta",b:"{-#",e:"#-}"},l={cN:"meta",b:"^#",e:"$"},c={cN:"type",b:"\\b[A-Z][\\w']*",r:0},n={b:"\\(",e:"\\)",i:'"',c:[a,l,{cN:"type",b:"\\b[A-Z][\\w]*(\\((\\.\\.|,|\\w+)\\))?"},e.inherit(e.TM,{b:"[_a-z][\\w']*"}),i]},s={b:"{",e:"}",c:n.c};return{aliases:["hs"],k:"let in if then else case of where do module import hiding qualified type data newtype deriving class instance as default infix infixl infixr foreign export ccall stdcall cplusplus jvm dotnet safe unsafe family forall mdo proc rec",c:[{bK:"module",e:"where",k:"module where",c:[n,i],i:"\\W\\.|;"},{b:"\\bimport\\b",e:"$",k:"import qualified as hiding",c:[n,i],i:"\\W\\.|;"},{cN:"class",b:"^(\\s*)?(class|instance)\\b",e:"where",k:"class family instance where",c:[c,n,i]},{cN:"class",b:"\\b(data|(new)?type)\\b",e:"$",k:"data family type newtype deriving",c:[a,c,n,s,i]},{bK:"default",e:"$",c:[c,n,i]},{bK:"infix infixl infixr",e:"$",c:[e.CNM,i]},{b:"\\bforeign\\b",e:"$",k:"foreign import export ccall stdcall cplusplus jvm dotnet safe unsafe",c:[c,e.QSM,i]},{cN:"meta",b:"#!\\/usr\\/bin\\/env runhaskell",e:"$"},a,l,e.QSM,e.CNM,c,e.inherit(e.TM,{b:"^[_a-z][\\w']*"}),i,{b:"->|<-"}]}});hljs.registerLanguage("scala",function(e){var t={cN:"meta",b:"@[A-Za-z]+"},a={cN:"subst",v:[{b:"\\$[A-Za-z0-9_]+"},{b:"\\${",e:"}"}]},r={cN:"string",v:[{b:'"',e:'"',i:"\\n",c:[e.BE]},{b:'"""',e:'"""',r:10},{b:'[a-z]+"',e:'"',i:"\\n",c:[e.BE,a]},{cN:"string",b:'[a-z]+"""',e:'"""',c:[a],r:10}]},c={cN:"symbol",b:"'\\w[\\w\\d_]*(?!')"},i={cN:"type",b:"\\b[A-Z][A-Za-z0-9_]*",r:0},s={cN:"title",b:/[^0-9\n\t "'(),.`{}\[\]:;][^\n\t "'(),.`{}\[\]:;]+|[^0-9\n\t "'(),.`{}\[\]:;=]/,r:0},n={cN:"class",bK:"class object trait type",e:/[:={\[\n;]/,eE:!0,c:[{bK:"extends with",r:10},{b:/\[/,e:/\]/,eB:!0,eE:!0,r:0,c:[i]},{cN:"params",b:/\(/,e:/\)/,eB:!0,eE:!0,r:0,c:[i]},s]},l={cN:"function",bK:"def",e:/[:={\[(\n;]/,eE:!0,c:[s]};return{k:{literal:"true false null",keyword:"type yield lazy override def with val var sealed abstract private trait object if forSome for while throw finally protected extends import final return else break new catch super class case package default try this match continue throws implicit"},c:[e.CLCM,e.CBCM,r,c,i,l,n,e.CNM,t]}}); diff --git a/docs/theme/index.hbs b/docs/theme/index.hbs deleted file mode 100644 index 50150a9f9d..0000000000 --- a/docs/theme/index.hbs +++ /dev/null @@ -1,231 +0,0 @@ - - - - - - {{ title }} - - - - - - - - - - - - - - - - - - - - - - {{#each additional_css}} - - {{/each}} - - {{#if mathjax_support}} - - - {{/if}} - - - - - - - - - - - - - - - - -
- -
- {{> header}} - - - {{#if search_enabled}} - - {{/if}} - - - - -
-
- {{{ content }}} -
-
-
- -
- - {{#if livereload}} - - - {{/if}} - - {{#if google_analytics}} - - - {{/if}} - - {{#if playpen_js}} - - - - - - {{/if}} - - {{#if search_js}} - - - - {{/if}} - - - - - - - {{#each additional_js}} - - {{/each}} - - {{#if is_print}} - {{#if mathjax_support}} - - {{else}} - - {{/if}} - {{/if}} - - - diff --git a/docs/vercel.json b/docs/vercel.json new file mode 100644 index 0000000000..a5e4dd8e90 --- /dev/null +++ b/docs/vercel.json @@ -0,0 +1,4 @@ +{ + "name": "PROJECT_NAME", + "scope": "solana-labs" +}