Merge with latest master, bug fixes, cleanup

This commit is contained in:
Mariano Sorgente 2019-11-20 16:28:26 +09:00
commit dfd3fd9676
32 changed files with 351 additions and 278 deletions

8
.gitignore vendored
View File

@ -8,6 +8,10 @@ __pycache__/
**/*.o
**/*.DS_Store
# Database
db/
nohup.out
# VDF executables
lib/chiavdf/fast_vdf/compile_asm
lib/chiavdf/fast_vdf/vdf
@ -17,8 +21,8 @@ lib/chiavdf/fast_vdf/vdf_server
lib/chiavdf/fast_vdf/flint
# Keys and plot files
src/config/keys.yaml
src/config/plots.yaml
config/keys.yaml
config/plots.yaml
# PyInstaller

View File

@ -7,6 +7,7 @@ Python 3.7 is used for this project. Make sure your python version is >=3.7 by t
# for Debian-based distros
sudo apt-get install build-essential cmake python3-dev python3-venv --no-install-recommends
git clone https://github.com/Chia-Network/chia-blockchain.git
git submodule update --init --recursive
python3 -m venv .venv
. .venv/bin/activate
@ -26,7 +27,7 @@ mongod --dbpath ./db/
### Generate keys
First, create some keys by running the following script:
```bash
python -m src.scripts.regenerate_keys
python -m scripts.regenerate_keys
```
### Run a full node
@ -34,7 +35,7 @@ To run a full node on port 8002, and connect to the testnet, run the following c
This wil also start an ssh server in port 8222 for the UI, which you can connect to
to see the state of the node.
```bash
python -m src.server.start_full_node "127.0.0.1" 8002 -u 8222
python -m src.server.start_full_node "127.0.0.1" 8002 -id 1 -u 8222 &
ssh -p 8222 localhost
```
@ -44,34 +45,35 @@ blocks (like Bitcoin's miners), and earn block rewards. First, you must generate
can take a long time depending on the size of the plots. Then, run the farmer + full node with
the following script. A full node is also started on port 8002, which you can ssh into to view the node UI.
```bash
python -m src.scrips.create_plots -k 20 -n 10
sh ./src/scripts/simulate_farming.sh
python -m scripts.create_plots -k 20 -n 10
sh ./scripts/simulate_farming.sh
```
### Run a timelord + full node
Timelords execute sequential verifiable delay functions (proofs of time), that get added to
blocks to make them valid. This requires fast CPUs and a lot of memory.
```bash
sh ./src/scripts/simulate_farming.sh
sh ./scripts/simulate_farming.sh
```
### Tips
When running the servers on Mac OS, allow the application to accept incoming connections.
Try running one of the full nodes a few minutes after the other ones, to test initial sync.
Configuration of peers can be changed in src/config/config.yaml.
Configuration of peers can be changed in config/config.yaml.
You can also run the simulation, which runs all servers and multiple full nodes, at once.
```bash
sh ./src/scripts/simulate_network.sh
sh ./scripts/simulate_network.sh
```
### Run tests and linting
The first time the tests are run, BlockTools will create and persist many plots. These are used for creating
proofs of space during testing. The next time tests are run, this won't be necessary.
```bash
py.test tests -s -v
black src tests
flake8 src
mypy src tests
py.test tests -s -v
```
@ -81,4 +83,5 @@ mypy src tests
3. Install mypy plugin
4. Preferences > Settings > Python > Linting > flake8 enabled
5. Preferences > Settings > Python > Linting > mypy enabled
7. Preferences > Settings > Formatting > Python > Provider > black
6. Preferences > Settings > mypy > Targets: set to ./src and ./tests

View File

@ -53,7 +53,7 @@ full_node:
# When we are > 1 but < sync_blocks_behind_threshold, download these many blocks
short_sync_download_count: 6
# This SSH key is for the ui SSH server
ssh_filename: "src/config/ssh_host_key"
ssh_filename: "config/ssh_host_key"
# How often to connect to introducer if we need to learn more peers
introducer_connect_interval: 60
# Continue trying to connect to more peers until this number of connections

7
config/keys.yaml Normal file
View File

@ -0,0 +1,7 @@
farmer_sk: 05b8a304a8778fb4718e1ee373824a0902168beb461097b671c63f1d9d1ada6a
farmer_target: c045ebfb7db00d3917c60382a12fbc6b038722d4df3d87ff1b2be3c4e2265ce2
pool_sks:
- 67b924be46e29146a3ece870685da62afb9c2a70c8cbab1ca831d248ba36d88f
- 188fd95f8118f56c651f733b10863402bdb76554d5c6f5eb4ee5750a0ab9ed10
pool_target: 1954909f6c90f0f9e2175ccd8608d8a42944d90bd50c81294ee4dac373460cd1
sk_seed: 0704de986fa718afbc24a827e2beef0022bb25180bbb58b84996bf0c9b05c4ae

61
config/plots.yaml Normal file
View File

@ -0,0 +1,61 @@
plots:
plot-0-20-130c6f5dbbab179c724ec6156ae0ee00cfd8b0635e70046e72dea772472a9b06.dat:
pool_pk: 0d655c0b87e239211e684b8d60d52f378f1759099f0ac85b1531cdc8f8aad9450fbbdb5d8453cb03d8febbc7fb2b41fc
sk: 5181f34796be1eae1c57c39ad60d7e6ed86fedcf9b283c699e768cc03caf3b23
plot-0-20-fc595ec4405f97c7433b4c93bd5eb6c0dfc4513160760bc031be4be4dbec9acf.dat:
pool_pk: 05608090e35a2978ab23607916f6198b093f48db4dc2aafa42a579e3f7a282c4cf34efdad9144bc7b4138a7ee1bc1e3a
sk: 3a162118570db6a52cd46e7514ff6f14bbf0ecff08ca8cbb69cd1f51d58567a4
plot-1-20-c6d6564f7c7af14dba95c34067d6f315d15d1c1dd60388851095d6ed5b6bfaa3.dat:
pool_pk: 05608090e35a2978ab23607916f6198b093f48db4dc2aafa42a579e3f7a282c4cf34efdad9144bc7b4138a7ee1bc1e3a
sk: 328a14dad8cfd59f3ee85f1a072f54f55d26d6db459b541fe245cb5b895b7152
plot-1-20-d8c49e7754f7725f398ed28cee6e0c1643eec2a9f1a96c3e3dce887817072d38.dat:
pool_pk: 0d655c0b87e239211e684b8d60d52f378f1759099f0ac85b1531cdc8f8aad9450fbbdb5d8453cb03d8febbc7fb2b41fc
sk: 072d0168be97d51eb5a255912674a4ce0bc8b792f1bc9646b8c805aecf29e3e8
plot-2-20-26f1f052a304d88a8cdd6c12b3ecdda137c9e01e1d9eb55da0f5e15b0deb1b1d.dat:
pool_pk: 0d655c0b87e239211e684b8d60d52f378f1759099f0ac85b1531cdc8f8aad9450fbbdb5d8453cb03d8febbc7fb2b41fc
sk: 675c6af6fbf9ceb3a0ea9be9b3229424d0be67d8cdd18a9ac2bd24ab04ec0327
plot-2-20-f55a4cfbc4eeac460aa31a0f6ecddd9e7e4d20e6e57c222715cb3ba34b648e3a.dat:
pool_pk: 05608090e35a2978ab23607916f6198b093f48db4dc2aafa42a579e3f7a282c4cf34efdad9144bc7b4138a7ee1bc1e3a
sk: 27781bee55f372e532e722f44f77b177d417e5e0c6a30253bb7d547565e096ed
plot-3-20-c8bfdaaf4c33c3929fe7ffd3401f3a7d098a7ddc3450c3e7684a61f4033f364b.dat:
pool_pk: 05608090e35a2978ab23607916f6198b093f48db4dc2aafa42a579e3f7a282c4cf34efdad9144bc7b4138a7ee1bc1e3a
sk: 7164a8814244c1c3d208d67f2466fab83ae518103026ec5b70d2393b6afffdf6
plot-3-20-f62b64ef138b62c16f6237f8ad21fde78677d63e7a8da40748e614dc4e0dd39a.dat:
pool_pk: 0d655c0b87e239211e684b8d60d52f378f1759099f0ac85b1531cdc8f8aad9450fbbdb5d8453cb03d8febbc7fb2b41fc
sk: 3f27508a5c54f5b759ce8c23d20873f7eeabe099c376b3f2353bd40f470833b1
plot-4-20-25158b54fdfa12357a4f259310f69af7587ce2bd8d1aee13b3771267115a9613.dat:
pool_pk: 05608090e35a2978ab23607916f6198b093f48db4dc2aafa42a579e3f7a282c4cf34efdad9144bc7b4138a7ee1bc1e3a
sk: 20a6d5b8b8149419d3abbcce3a8581fa153fba7422903bf88864b613a9b08612
plot-4-20-3b1e32f777f98206e580c3cb5316c81ca889d3b0ffd8ffc92233608862378c83.dat:
pool_pk: 0d655c0b87e239211e684b8d60d52f378f1759099f0ac85b1531cdc8f8aad9450fbbdb5d8453cb03d8febbc7fb2b41fc
sk: 14d4a192cc1d21833161d48bcd1854f887ea2505f1703260543d3c36cda6655a
plot-5-20-49f6dbd5273f6ecbfa917fff3d328806682a9859b955eaaea78e704d015591c4.dat:
pool_pk: 0d655c0b87e239211e684b8d60d52f378f1759099f0ac85b1531cdc8f8aad9450fbbdb5d8453cb03d8febbc7fb2b41fc
sk: 41ff4eb2213cb9fb01da7d9dca1a9181fc822d84c317d18206e314d4d99004a9
plot-5-20-ea00f9ada1bf0773dac219c5e29d404cbec15899a0c835641fd05ba2e05d3e8e.dat:
pool_pk: 05608090e35a2978ab23607916f6198b093f48db4dc2aafa42a579e3f7a282c4cf34efdad9144bc7b4138a7ee1bc1e3a
sk: 2a2661aa35fb331a49779aff7e4ecd40ed4e18342cb3c88de7d48811fabac886
plot-6-20-6a508fc80b1b0cf0407348a4c3be2cf720c3534bad3bdd0e655d600e2bd84b87.dat:
pool_pk: 0d655c0b87e239211e684b8d60d52f378f1759099f0ac85b1531cdc8f8aad9450fbbdb5d8453cb03d8febbc7fb2b41fc
sk: 15332ef3d5012b096c6703f952bfda62d709aecb124a16eccdbdbceed19dd77f
plot-6-20-97abe3376e416f00a0cfc031902b5d145425fab814dcd58b463edd07c59d0922.dat:
pool_pk: 05608090e35a2978ab23607916f6198b093f48db4dc2aafa42a579e3f7a282c4cf34efdad9144bc7b4138a7ee1bc1e3a
sk: 28a19383993cec5e450c25df3fd87b4d80ba12d1a12c056cc48c6b79b6ead1ee
plot-7-20-18b17a80a34a554ec5beb2738bb0599af4a1d043beb1571478bc43ebc14ebad5.dat:
pool_pk: 0d655c0b87e239211e684b8d60d52f378f1759099f0ac85b1531cdc8f8aad9450fbbdb5d8453cb03d8febbc7fb2b41fc
sk: 23f170bcfcd9af343a6128247ec3ec058f88efc01d6a5b5f6b4e5fa1ce76ee85
plot-7-20-62e6daaf7f6e80f9ffe0a69b8992ab7040a2127df8f093f7f851afe107c4a6ac.dat:
pool_pk: 05608090e35a2978ab23607916f6198b093f48db4dc2aafa42a579e3f7a282c4cf34efdad9144bc7b4138a7ee1bc1e3a
sk: 31225161bf28f0dd73964158aa293384d53c1c342390d2eb4ffa5d6b7932ee80
plot-8-20-b9f32bef90960327043edb64c8e595ae85c74fe32a4fb2eefe701c4a01774978.dat:
pool_pk: 0d655c0b87e239211e684b8d60d52f378f1759099f0ac85b1531cdc8f8aad9450fbbdb5d8453cb03d8febbc7fb2b41fc
sk: 72dfadf874660d44d83639df581cf8bfc96d4ccbce0140bc841d58a4e3d89974
plot-8-20-e3d281eddfb1a0e0184ca97ed597c662f2496e2ae57b45b11d7f80fe39b0cbb4.dat:
pool_pk: 05608090e35a2978ab23607916f6198b093f48db4dc2aafa42a579e3f7a282c4cf34efdad9144bc7b4138a7ee1bc1e3a
sk: 09051981a61cf7a975242da3501a680b18754c0eaa76f41e8072cd87e27bf71c
plot-9-20-22c0aca0956d448ff24b2b596fa8905ab4a45bba2002129ac7609b9c333cb540.dat:
pool_pk: 05608090e35a2978ab23607916f6198b093f48db4dc2aafa42a579e3f7a282c4cf34efdad9144bc7b4138a7ee1bc1e3a
sk: 2ddfab8c1b1fee42d3de2d5626cdb5d6970987077aa9893a4d270b07d483c11c
plot-9-20-7a43ae10741684efafd338e084f06f8bb99c189d84c640851cf147370db34745.dat:
pool_pk: 0d655c0b87e239211e684b8d60d52f378f1759099f0ac85b1531cdc8f8aad9450fbbdb5d8453cb03d8febbc7fb2b41fc
sk: 728c9c746bcce4f4f5e376806bb7938664abdb4d8651cf766ac103e6168b0be9

0
src/scripts/common.sh → scripts/common.sh Normal file → Executable file
View File

View File

@ -11,8 +11,8 @@ from src.types.proof_of_space import ProofOfSpace
from src.types.sized_bytes import bytes32
plot_root = os.path.join(ROOT_DIR, "plots")
plot_config_filename = os.path.join(ROOT_DIR, "src", "config", "plots.yaml")
key_config_filename = os.path.join(ROOT_DIR, "src", "config", "keys.yaml")
plot_config_filename = os.path.join(ROOT_DIR, "config", "plots.yaml")
key_config_filename = os.path.join(ROOT_DIR, "config", "keys.yaml")
def main():
@ -32,7 +32,7 @@ def main():
# We need the keys file, to access pool keys (if the exist), and the sk_seed.
args = parser.parse_args()
if not os.path.isfile(key_config_filename):
raise RuntimeError("Keys not generated. Run ./src/scripts/regenerate_keys.py.")
raise RuntimeError("Keys not generated. Run ./scripts/regenerate_keys.py.")
# The seed is what will be used to generate a private key for each plot
key_config = safe_load(open(key_config_filename, "r"))

View File

@ -8,7 +8,7 @@ from yaml import safe_dump, safe_load
from definitions import ROOT_DIR
key_config_filename = os.path.join(ROOT_DIR, "src", "config", "keys.yaml")
key_config_filename = os.path.join(ROOT_DIR, "config", "keys.yaml")
def str2bool(v: str) -> bool:

View File

@ -1,10 +1,10 @@
. .venv/bin/activate
. src/scripts/common.sh
. scripts/common.sh
# Starts a harvester, farmer, and full node.
_run_bg_cmd python -m src.server.start_harvester
_run_bg_cmd python -m src.server.start_farmer
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8444 -f -u 8222
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8444 -id 1 -f -u 8222
wait

7
scripts/simulate_full_node.sh Executable file
View File

@ -0,0 +1,7 @@
. .venv/bin/activate
. scripts/common.sh
# Starts a full node
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8444 -id 1 -f -u 8222
wait

View File

@ -1,28 +0,0 @@
. .venv/bin/activate
_kill_servers() {
ps -e | grep python | awk '{print $1}' | xargs -L1 kill
ps -e | grep "vdf_server" | awk '{print $1}' | xargs -L1 kill
}
_kill_servers
python -m src.server.start_full_node "127.0.0.1" 8002 -id 1 -f &
P4=$!
python -m src.server.start_full_node "127.0.0.1" 8004 -id 2 -t -u 8222 &
P5=$!
python -m src.server.start_full_node "127.0.0.1" 8005 -id 3 &
P6=$!
_term() {
echo "Caught SIGTERM signal, killing all servers."
kill -TERM "$P4" 2>/dev/null
kill -TERM "$P5" 2>/dev/null
kill -TERM "$P6" 2>/dev/null
_kill_servers
}
trap _term SIGTERM
trap _term SIGINT
trap _term INT
wait $P4 $P5 $P6

View File

@ -1,37 +1,14 @@
. .venv/bin/activate
. scripts/common.sh
_kill_servers() {
ps -e | grep python | awk '{print $1}' | xargs -L1 kill
ps -e | grep "vdf_server" | awk '{print $1}' | xargs -L1 kill
}
# Starts a harvester, farmer, timelord, introducer, and 3 full nodes.
_kill_servers
_run_bg_cmd python -m src.server.start_harvester
_run_bg_cmd python -m src.server.start_timelord
_run_bg_cmd python -m src.server.start_farmer
_run_bg_cmd python -m src.server.start_introducer
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8444 -id 1 -t -u 8222
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8002 -id 2 -f
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8005 -id 3
python -m src.server.start_plotter &
P1=$!
python -m src.server.start_timelord &
P2=$!
python -m src.server.start_farmer &
P3=$!
python -m src.server.start_full_node "127.0.0.1" 8002 -id 1 -f &
P4=$!
python -m src.server.start_full_node "127.0.0.1" 8004 -id 2 -t -u 8222 &
P5=$!
python -m src.server.start_full_node "127.0.0.1" 8005 -id 3 &
P6=$!
_term() {
echo "Caught SIGTERM signal, killing all servers."
kill -TERM "$P1" 2>/dev/null
kill -TERM "$P2" 2>/dev/null
kill -TERM "$P3" 2>/dev/null
kill -TERM "$P4" 2>/dev/null
kill -TERM "$P5" 2>/dev/null
kill -TERM "$P6" 2>/dev/null
_kill_servers
}
trap _term SIGTERM
trap _term SIGINT
trap _term INT
wait $P1 $P2 $P3 $P4 $P5 $P6
wait

View File

@ -1,28 +0,0 @@
. .venv/bin/activate
_kill_servers() {
ps -e | grep python | awk '{print $1}' | xargs -L1 kill
ps -e | grep "vdf_server" | awk '{print $1}' | xargs -L1 kill
}
_kill_servers
python -m src.server.start_plotter &
P1=$!
python -m src.server.start_timelord &
P2=$!
python -m src.server.start_farmer &
P3=$!
_term() {
echo "Caught SIGTERM signal, killing all servers."
kill -TERM "$P1" 2>/dev/null
kill -TERM "$P2" 2>/dev/null
kill -TERM "$P3" 2>/dev/null
_kill_servers
}
trap _term SIGTERM
trap _term SIGINT
trap _term INT
wait $P1 $P2 $P3

View File

@ -1,9 +1,9 @@
. .venv/bin/activate
. src/scripts/common.sh
. scripts/common.sh
# Starts a timelord, and a full node
_run_bg_cmd python -m src.server.start_timelord
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8444 -t -u 8222
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8444 -id 1 -t -u 8222
wait

View File

@ -1,2 +1,3 @@
ps -e | grep python | awk '{print $1}' | xargs -L1 kill
ps -e | grep "vdf_server" | awk '{print $1}' | xargs -L1 kill
# Stops all python servers and VDF processes running on this machine
. scripts/common.sh

View File

@ -1,7 +1,7 @@
#!/usr/bin/python3
from setuptools import setup
dependencies = ["blspy", "cbor2", "pyyaml", "asyncssh"]
dependencies = ["blspy", "cbor2", "pyyaml", "asyncssh", "motor"]
dev_dependencies = [
"pytest",
"flake8",

View File

@ -47,11 +47,11 @@ class Blockchain:
self.store = store
self.tips: List[FullBlock] = []
self.lca_block: FullBlock
self.height_to_hash: Dict[uint64, bytes32] = {}
self.height_to_hash: Dict[uint32, bytes32] = {}
async def initialize(self):
async for block in self.store.get_blocks():
if not self.tips or block.height > self.tips[0].height:
if not self.tips or block.weight > self.tips[0].weight:
self.tips = [block]
# TODO: are cases where the blockchain "fans out" handled appropriately?
self.height_to_hash[block.height] = block.header_hash
@ -116,9 +116,11 @@ class Blockchain:
if height > curr_block.height:
raise ValueError("Height is not valid for tip {tip_header_hash}")
while height < curr_block.height:
curr_block = (
await self.store.get_block(curr_block.header.data.prev_header_hash)
).header_block
fetched: Optional[FullBlock] = await self.store.get_block(
curr_block.header.data.prev_header_hash
)
assert fetched is not None
curr_block = fetched.header_block
headers.append((index, curr_block))
return [b for index, b in sorted(headers)]
@ -128,13 +130,15 @@ class Blockchain:
where both blockchains are equal.
"""
lca: HeaderBlock = self.lca_block.header_block
assert lca.height < alternate_chain[-1].height
if lca.height >= alternate_chain[-1].height:
raise ValueError("Alternate chain is shorter")
low = 0
high = lca.height
while low + 1 < high:
mid = (low + high) // 2
if (
self.height_to_hash[uint64(mid)]
self.height_to_hash[uint32(mid)]
!= alternate_chain[mid].header.get_hash()
):
high = mid
@ -142,13 +146,13 @@ class Blockchain:
low = mid
if low == high and low == 0:
assert (
self.height_to_hash[uint64(0)] == alternate_chain[0].header.get_hash()
self.height_to_hash[uint32(0)] == alternate_chain[0].header.get_hash()
)
return alternate_chain[0]
assert low + 1 == high
if self.height_to_hash[uint64(low)] == alternate_chain[low].header.get_hash():
if self.height_to_hash[uint32(low)] == alternate_chain[low].header.get_hash():
if (
self.height_to_hash[uint64(high)]
self.height_to_hash[uint32(high)]
== alternate_chain[high].header.get_hash()
):
return alternate_chain[high]
@ -156,7 +160,7 @@ class Blockchain:
return alternate_chain[low]
elif low > 0:
assert (
self.height_to_hash[uint64(low - 1)]
self.height_to_hash[uint32(low - 1)]
== alternate_chain[low - 1].header.get_hash()
)
return alternate_chain[low - 1]
@ -172,7 +176,7 @@ class Blockchain:
if block is None:
raise Exception("Given header_hash must reference block already added")
next_height: uint32 = block.height + 1
next_height: uint32 = uint32(block.height + 1)
if next_height < self.constants["DIFFICULTY_EPOCH"]:
# We are in the first epoch
return uint64(self.constants["DIFFICULTY_STARTING"])
@ -186,6 +190,10 @@ class Blockchain:
):
# Not at a point where difficulty would change
prev_block = await self.store.get_block(block.prev_header_hash)
assert block.header_block.challenge is not None
assert (
prev_block is not None and prev_block.header_block.challenge is not None
)
if prev_block is None:
raise Exception("Previous block is invalid.")
return uint64(
@ -197,16 +205,16 @@ class Blockchain:
# ----------|-----|----------------------|-----|-----...
# h1 h2 h3 i-1
# Height1 is the last block 2 epochs ago, so we can include the time to mine 1st block in previous epoch
height1 = uint64(
height1 = uint32(
next_height
- self.constants["DIFFICULTY_EPOCH"]
- self.constants["DIFFICULTY_DELAY"]
- 1
)
# Height2 is the DIFFICULTY DELAYth block in the previous epoch
height2 = uint64(next_height - self.constants["DIFFICULTY_EPOCH"] - 1)
height2 = uint32(next_height - self.constants["DIFFICULTY_EPOCH"] - 1)
# Height3 is the last block in the previous epoch
height3 = uint64(next_height - self.constants["DIFFICULTY_DELAY"] - 1)
height3 = uint32(next_height - self.constants["DIFFICULTY_DELAY"] - 1)
# h1 to h2 timestamps are mined on previous difficulty, while and h2 to h3 timestamps are mined on the
# current difficulty
@ -218,7 +226,8 @@ class Blockchain:
):
# This means we are either on a fork, or on one of the chains, but after the LCA,
# so we manually backtrack.
curr = block
curr: Optional[FullBlock] = block
assert curr is not None
while (
curr.height not in self.height_to_hash
or self.height_to_hash[curr.height] != curr.header_hash
@ -251,7 +260,7 @@ class Blockchain:
else:
# In the case of height == -1, there is no timestamp here, so assume the genesis block
# took constants["BLOCK_TIME_TARGET"] seconds to mine.
genesis = await self.store.get_block(self.height_to_hash[uint64(0)])
genesis = await self.store.get_block(self.height_to_hash[uint32(0)])
assert genesis is not None
timestamp1 = (
genesis.header_block.header.data.timestamp
@ -304,20 +313,20 @@ class Blockchain:
the number of iterations of the last epoch, and changes at the same block as the difficulty.
"""
block = await self.store.get_block(header_hash)
if block is None:
if block is None or block.header_block.challenge is None:
raise Exception("Given header_hash must reference block already added")
next_height: uint32 = block.height + 1
next_height: uint32 = uint32(block.height + 1)
if next_height < self.constants["DIFFICULTY_EPOCH"]:
# First epoch has a hardcoded vdf speed
return self.constants["VDF_IPS_STARTING"]
prev_block = await self.store.get_block(block.prev_header_hash)
if prev_block is None:
if prev_block is None or prev_block.header_block.challenge is None:
raise Exception("Previous block is invalid.")
proof_of_space = block.header_block.proof_of_space
difficulty = await self.get_next_difficulty(prev_block.header_hash)
iterations = (
iterations = uint64(
block.header_block.challenge.total_iters
- prev_block.header_block.challenge.total_iters
)
@ -339,23 +348,25 @@ class Blockchain:
# block of the last epochs. Basically, it's total iterations over time, of previous epoch.
# Height1 is the last block 2 epochs ago, so we can include the iterations taken for mining first block in epoch
height1 = uint64(
height1 = uint32(
next_height
- self.constants["DIFFICULTY_EPOCH"]
- self.constants["DIFFICULTY_DELAY"]
- 1
)
# Height2 is the last block in the previous epoch
height2 = uint64(next_height - self.constants["DIFFICULTY_DELAY"] - 1)
height2 = uint32(next_height - self.constants["DIFFICULTY_DELAY"] - 1)
block1, block2 = None, None
block1: Optional[FullBlock] = None
block2: Optional[FullBlock] = None
if (
block.header_block not in self.get_current_tips()
or height2 not in self.height_to_hash
):
# This means we are either on a fork, or on one of the chains, but after the LCA,
# so we manually backtrack.
curr = block
curr: Optional[FullBlock] = block
assert curr is not None
while (
curr.height not in self.height_to_hash
or self.height_to_hash[curr.height] != curr.header_hash
@ -367,25 +378,28 @@ class Blockchain:
curr = await self.store.get_block(curr.prev_header_hash)
assert curr is not None
# Once we are before the fork point (and before the LCA), we can use the height_to_hash map
if not block1 and height1 >= 0:
if block1 is None and height1 >= 0:
# height1 could be -1, for the first difficulty calculation
block1 = await self.store.get_block(self.height_to_hash[height1])
if not block2:
if block2 is None:
block2 = await self.store.get_block(self.height_to_hash[height2])
assert block2 is not None
assert block2.header_block.challenge is not None
if block1:
if block1 is not None:
assert block1.header_block.challenge is not None
timestamp1 = block1.header_block.header.data.timestamp
iters1 = block1.header_block.challenge.total_iters
else:
# In the case of height == -1, there is no timestamp here, so assume the genesis block
# took constants["BLOCK_TIME_TARGET"] seconds to mine.
genesis = await self.store.get_block(self.height_to_hash[uint64(0)])
genesis = await self.store.get_block(self.height_to_hash[uint32(0)])
assert genesis is not None
timestamp1 = (
genesis.header_block.header.data.timestamp
- self.constants["BLOCK_TIME_TARGET"]
)
assert genesis.header_block.challenge is not None
iters1 = genesis.header_block.challenge.total_iters
timestamp2 = block2.header_block.header.data.timestamp
@ -588,7 +602,9 @@ class Blockchain:
return False
if not genesis:
prev_block: FullBlock = await self.store.get_block(block.prev_header_hash)
prev_block: Optional[FullBlock] = await self.store.get_block(
block.prev_header_hash
)
if not prev_block or not prev_block.header_block.challenge:
return False
@ -643,28 +659,32 @@ class Blockchain:
curr_old: Optional[HeaderBlock] = old_lca.header_block if old_lca else None
curr_new: HeaderBlock = new_lca.header_block
while True:
fetched: Optional[FullBlock]
if not curr_old or curr_old.height < curr_new.height:
self.height_to_hash[uint64(curr_new.height)] = curr_new.header_hash
self.height_to_hash[uint32(curr_new.height)] = curr_new.header_hash
if curr_new.height == 0:
return
curr_new = (
await self.store.get_block(curr_new.prev_header_hash)
).header_block
fetched = await self.store.get_block(curr_new.prev_header_hash)
assert fetched is not None
curr_new = fetched.header_block
elif curr_old.height > curr_new.height:
del self.height_to_hash[uint64(curr_old.height)]
curr_old = (
await self.store.get_block(curr_old.prev_header_hash)
).header_block
del self.height_to_hash[uint32(curr_old.height)]
fetched = await self.store.get_block(curr_old.prev_header_hash)
assert fetched is not None
curr_old = fetched.header_block
else:
if curr_new.header_hash == curr_old.header_hash:
return
self.height_to_hash[uint64(curr_new.height)] = curr_new.header_hash
curr_new = (
await self.store.get_block(curr_new.prev_header_hash)
).header_block
curr_old = (
await self.store.get_block(curr_old.prev_header_hash)
).header_block
self.height_to_hash[uint32(curr_new.height)] = curr_new.header_hash
fetched_new: Optional[FullBlock] = await self.store.get_block(
curr_new.prev_header_hash
)
fetched_old: Optional[FullBlock] = await self.store.get_block(
curr_old.prev_header_hash
)
assert fetched_new is not None and fetched_old is not None
curr_new = fetched_new.header_block
curr_old = fetched_old.header_block
async def _reconsider_lca(self, genesis: bool):
"""
@ -675,7 +695,11 @@ class Blockchain:
while any(b.header_hash != cur[0].header_hash for b in cur):
heights = [b.height for b in cur]
i = heights.index(max(heights))
cur[i] = await self.store.get_block(cur[i].prev_header_hash)
fetched: Optional[FullBlock] = await self.store.get_block(
cur[i].prev_header_hash
)
assert fetched is not None
cur[i] = fetched
if genesis:
await self._reconsider_heights(None, cur[0])
else:

View File

@ -42,17 +42,32 @@ class FullNodeStore(Database):
super().__init__(db_name)
# Stored on database
# All full blocks which have been added to the blockchain. Header_hash -> block
self.full_blocks = self.db.get_collection("full_blocks")
self.potential_heads = self.db.get_collection("potential_heads")
# Potential new tips that we have received from others.
self.potential_tips = self.db.get_collection("potential_tips")
# Header blocks received from other peers during sync
self.potential_headers = self.db.get_collection("potential_headers")
# Blocks received from other peers during sync
self.potential_blocks = self.db.get_collection("potential_blocks")
# Blocks which we have created, but don't have proof of space yet
self.candidate_blocks = self.db.get_collection("candidate_blocks")
# Blocks which are not finalized yet (no proof of time)
self.unfinished_blocks = self.db.get_collection("unfinished_blocks")
# Whether or not we are syncing
self.sync_mode = self.db.get_collection("sync_mode")
# Stored in memory
self.unfinished_blocks_leader = None
# Our best unfinished block
self.unfinished_blocks_leader: Tuple[uint32, uint64] = (
uint32(0),
uint64(9999999999),
)
# Event to signal when blocks are received at each height
self.potential_blocks_received: Dict[uint32, asyncio.Event] = {}
# Blocks that we have finalized during sync, queue them up for adding after sync is done
self.potential_future_blocks: List[FullBlock] = []
# Current estimate of the speed of the network timelords
self.proof_of_time_estimate_ips: uint64 = uint64(3000)
# Lock
@ -60,7 +75,7 @@ class FullNodeStore(Database):
async def _clear_database(self):
await self.full_blocks.drop()
await self.potential_heads.drop()
await self.potential_tips.drop()
await self.potential_headers.drop()
await self.potential_blocks.drop()
await self.candidate_blocks.drop()
@ -100,16 +115,15 @@ class FullNodeStore(Database):
await self.set_sync_mode(sync_mode)
async def clear_sync_info(self):
await self.potential_heads.drop()
await self.potential_tips.drop()
await self.potential_headers.drop()
await self.potential_blocks.drop()
self.potential_blocks_received.clear()
self.potential_future_blocks.clear()
async def get_potential_heads_number(self) -> int:
return await self.potential_heads.count_documents({})
async def get_potential_heads_tuples(self) -> List[Tuple[bytes32, FullBlock]]:
async def get_potential_tips_tuples(self) -> List[Tuple[bytes32, FullBlock]]:
ans = []
async for query in self.potential_heads.find({}):
async for query in self.potential_tips.find({}):
if query and "block" in query:
block = FullBlock.from_bytes(query["block"])
else:
@ -117,16 +131,14 @@ class FullNodeStore(Database):
ans.append((bytes32(query["_id"]), block))
return ans
async def add_potential_head(
self, header_hash: bytes32, block: Optional[FullBlock] = None
) -> None:
action = {"$set": {"block": block} if block else {"_id": header_hash}}
await self.potential_heads.find_one_and_update(
{"_id": header_hash}, action, upsert=True
async def add_potential_tip(self, block: FullBlock) -> None:
action = {"$set": {"block": block}}
await self.potential_tips.find_one_and_update(
{"_id": block.header_hash}, action, upsert=True
)
async def get_potential_head(self, header_hash: bytes32) -> Optional[FullBlock]:
query = await self.potential_heads.find_one({"_id": header_hash})
async def get_potential_tip(self, header_hash: bytes32) -> Optional[FullBlock]:
query = await self.potential_tips.find_one({"_id": header_hash})
block = query.get("block", None) if query else None
return FullBlock.from_bytes(block) if block else None
@ -158,6 +170,12 @@ class FullNodeStore(Database):
async def get_potential_blocks_received(self, height: uint32) -> asyncio.Event:
return self.potential_blocks_received[height]
async def add_potential_future_block(self, block: FullBlock):
self.potential_future_blocks.append(block)
async def get_potential_future_blocks(self):
return self.potential_future_blocks
async def add_candidate_block(
self, pos_hash: bytes32, body: Body, header: HeaderData, pos: ProofOfSpace,
):
@ -194,10 +212,18 @@ class FullNodeStore(Database):
query = await self.unfinished_blocks.find_one({"_id": code})
return FullBlock.from_bytes(query["block"]) if query else None
async def get_unfinished_blocks(self) -> Dict[Tuple[bytes32, uint64], FullBlock]:
d = {}
async for document in self.unfinished_blocks.find({}):
challenge_hash = document["_id"][:32]
iters = uint64(int.from_bytes(document["_id"][32:], byteorder="big"))
d[(challenge_hash, iters)] = FullBlock.from_bytes(document["block"])
return d
def set_unfinished_block_leader(self, key: Tuple[bytes32, uint64]) -> None:
self.unfinished_blocks_leader = key
def get_unfinished_block_leader(self) -> Optional[Tuple[bytes32, uint64]]:
def get_unfinished_block_leader(self) -> Tuple[bytes32, uint64]:
return self.unfinished_blocks_leader
async def set_proof_of_time_estimate_ips(self, estimate: uint64):

View File

@ -28,11 +28,11 @@ HARVESTER PROTOCOL (FARMER <-> HARVESTER)
class Farmer:
def __init__(self):
config_filename = os.path.join(ROOT_DIR, "src", "config", "config.yaml")
key_config_filename = os.path.join(ROOT_DIR, "src", "config", "keys.yaml")
config_filename = os.path.join(ROOT_DIR, "config", "config.yaml")
key_config_filename = os.path.join(ROOT_DIR, "config", "keys.yaml")
if not os.path.isfile(key_config_filename):
raise RuntimeError(
"Keys not generated. Run ./src/scripts/regenerate_keys.py."
"Keys not generated. Run ./scripts/regenerate_keys.py."
)
self.config = safe_load(open(config_filename, "r"))["farmer"]
self.key_config = safe_load(open(key_config_filename, "r"))
@ -76,7 +76,7 @@ class Farmer:
self.proof_of_time_estimate_ips,
constants["MIN_BLOCK_TIME"],
)
if height < 300: # As the difficulty adjusts, don't fetch all qualities
if height < 500: # As the difficulty adjusts, don't fetch all qualities
if challenge_response.challenge_hash not in self.challenge_to_best_iters:
self.challenge_to_best_iters[
challenge_response.challenge_hash

View File

@ -29,6 +29,7 @@ from src.types.header import Header, HeaderData
from src.types.header_block import HeaderBlock
from src.types.peer_info import PeerInfo
from src.types.sized_bytes import bytes32
from src.types.proof_of_space import ProofOfSpace
from src.util import errors
from src.util.api_decorators import api_request
from src.util.errors import (
@ -46,7 +47,7 @@ class FullNode:
blockchain: Blockchain
def __init__(self, store: FullNodeStore, blockchain: Blockchain):
config_filename = os.path.join(ROOT_DIR, "src", "config", "config.yaml")
config_filename = os.path.join(ROOT_DIR, "config", "config.yaml")
self.config = yaml.safe_load(open(config_filename, "r"))["full_node"]
self.store = store
self.blockchain = blockchain
@ -94,22 +95,38 @@ class FullNode:
self, delivery: Delivery = Delivery.BROADCAST
) -> AsyncGenerator[OutboundMessage, None]:
"""
Sends all of the current heads to all timelord peers.
Sends all of the current heads (as well as Pos infos) to all timelord peers.
"""
requests: List[timelord_protocol.ChallengeStart] = []
challenge_requests: List[timelord_protocol.ChallengeStart] = []
pos_info_requests: List[timelord_protocol.ProofOfSpaceInfo] = []
async with self.store.lock:
for head in self.blockchain.get_current_tips():
assert head.challenge
challenge_hash = head.challenge.get_hash()
requests.append(
tips: List[HeaderBlock] = self.blockchain.get_current_tips()
for tip in tips:
assert tip.challenge
challenge_hash = tip.challenge.get_hash()
challenge_requests.append(
timelord_protocol.ChallengeStart(
challenge_hash, head.challenge.total_weight
challenge_hash, tip.challenge.total_weight
)
)
for request in requests:
tip_hashes = [tip.header_hash for tip in tips]
tip_infos = [
tup[0]
for tup in list((await self.store.get_unfinished_blocks()).items())
if tup[1].prev_header_hash in tip_hashes
]
for chall, iters in tip_infos:
pos_info_requests.append(
timelord_protocol.ProofOfSpaceInfo(chall, iters)
)
for challenge_msg in challenge_requests:
yield OutboundMessage(
NodeType.TIMELORD, Message("challenge_start", request), delivery
NodeType.TIMELORD, Message("challenge_start", challenge_msg), delivery
)
for pos_info_msg in pos_info_requests:
yield OutboundMessage(
NodeType.TIMELORD, Message("proof_of_space_info", pos_info_msg), delivery
)
async def _on_connect(self) -> AsyncGenerator[OutboundMessage, None]:
@ -187,15 +204,19 @@ class FullNode:
highest_weight: uint64 = uint64(0)
tip_block: FullBlock
tip_height = 0
caught_up = False
# Based on responses from peers about the current heads, see which head is the heaviest
# (similar to longest chain rule).
async with self.store.lock:
potential_heads = (await self.store.get_potential_heads()).items()
log.info(f"Have collected {len(potential_heads)} potential heads")
for header_hash, _ in potential_heads:
block = await self.store.get_potential_heads_full_block(header_hash)
potential_tips: List[
Tuple[bytes32, FullBlock]
] = await self.store.get_potential_tips_tuples()
log.info(f"Have collected {len(potential_tips)} potential tips")
for header_hash, block in potential_tips:
if block.header_block.challenge is None:
raise ValueError(f"Invalid tip block {block.header_hash} received")
if block.header_block.challenge.total_weight > highest_weight:
highest_weight = block.header_block.challenge.total_weight
tip_block = block
@ -204,15 +225,18 @@ class FullNode:
[t.weight for t in self.blockchain.get_current_tips()]
):
log.info("Not performing sync, already caught up.")
await self.store.set_sync_mode(False)
await self.store.clear_sync_info()
return
caught_up = True
if caught_up:
async for msg in self._finish_sync():
yield msg
return
assert tip_block
log.info(f"Tip block {tip_block.header_hash} tip height {tip_block.height}")
# Now, we download all of the headers in order to verify the weight
# TODO: use queue here, request a few at a time
# TODO: send multiple API calls out at once
timeout = 20
timeout = 30
sleep_interval = 3
total_time_slept = 0
headers: List[HeaderBlock] = []
@ -270,7 +294,7 @@ class FullNode:
# Only download from fork point (what we don't have)
async with self.store.lock:
have_block = (
await self.store.get_potential_heads_full_block(
await self.store.get_potential_tip(
headers[height].header.get_hash()
)
is not None
@ -311,7 +335,7 @@ class FullNode:
async with self.store.lock:
# TODO: ban peers that provide bad blocks
if have_block:
block = await self.store.get_potential_head(
block = await self.store.get_potential_tip(
headers[height].header.get_hash()
)
@ -336,12 +360,27 @@ class FullNode:
await self.store.set_proof_of_time_estimate_ips(
await self.blockchain.get_next_ips(block.header_hash)
)
log.info(f"Finished sync up to height {tip_height}")
async for msg in self._finish_sync():
yield msg
async def _finish_sync(self):
"""
Finalize sync by setting sync mode to False, clearing all sync information, and adding any final
blocks that we have finalized recently.
"""
async with self.store.lock:
log.info(f"Finished sync up to height {tip_height}")
potential_fut_blocks = (
await self.store.get_potential_future_blocks()
).copy()
await self.store.set_sync_mode(False)
await self.store.clear_sync_info()
for block in potential_fut_blocks:
async for msg in self.block(peer_protocol.Block(block)):
yield msg
# Update farmers and timelord with most recent information
async for msg in self._send_challenges_to_timelords():
yield msg
@ -556,17 +595,16 @@ class FullNode:
we call the unfinished_block routine.
"""
async with self.store.lock:
if (
await self.store.get_candidate_block(header_signature.pos_hash)
) is None:
candidate: Optional[
Tuple[Body, HeaderData, ProofOfSpace]
] = await self.store.get_candidate_block(header_signature.pos_hash)
if candidate is None:
log.warning(
f"PoS hash {header_signature.pos_hash} not found in database"
)
return
# Verifies that we have the correct header and body self.stored
block_body, block_header_data, pos = await self.store.get_candidate_block(
header_signature.pos_hash
)
block_body, block_header_data, pos = candidate
assert block_header_data.get_hash() == header_signature.header_hash
@ -578,6 +616,7 @@ class FullNode:
# Propagate to ourselves (which validates and does further propagations)
request = peer_protocol.UnfinishedBlock(unfinished_block_obj)
log.error("Will call unf")
async for m in self.unfinished_block(request):
# Yield all new messages (propagation to peers)
yield m
@ -635,8 +674,15 @@ class FullNode:
new_header_block, unfinished_block_obj.body
)
async for msg in self.block(peer_protocol.Block(new_full_block)):
yield msg
async with self.store.lock:
sync_mode = await self.store.get_sync_mode()
if sync_mode:
async with self.store.lock:
await self.store.add_potential_future_block(new_full_block)
else:
async for msg in self.block(peer_protocol.Block(new_full_block)):
yield msg
# PEER PROTOCOL
@api_request
@ -680,6 +726,7 @@ class FullNode:
We can validate it and if it's a good block, propagate it to other peers and
timelords.
"""
log.error("CALED")
async with self.store.lock:
if not self.blockchain.is_child_of_head(unfinished_block.block):
return
@ -712,7 +759,7 @@ class FullNode:
if await self.store.get_unfinished_block(
(challenge_hash, iterations_needed)
):
) is not None:
return
expected_time: uint64 = uint64(
@ -725,15 +772,13 @@ class FullNode:
await asyncio.sleep(3)
async with self.store.lock:
leader: Tuple[
uint32, uint64
] = await self.store.get_unfinished_block_leader()
leader: Tuple[uint32, uint64] = self.store.get_unfinished_block_leader()
if leader is None or unfinished_block.block.height > leader[0]:
log.info(
f"This is the first block at height {unfinished_block.block.height}, so propagate."
)
# If this is the first block we see at this height, propagate
await self.store.set_unfinished_block_leader(
self.store.set_unfinished_block_leader(
(unfinished_block.block.height, expected_time)
)
elif unfinished_block.block.height == leader[0]:
@ -748,9 +793,7 @@ class FullNode:
f"New best unfinished block at height {unfinished_block.block.height}"
)
# If this will be the first block to finalize, update our leader
await self.store.set_unfinished_block_leader(
(leader[0], expected_time)
)
self.store.set_unfinished_block_leader((leader[0], expected_time))
else:
# If we have seen an unfinished block at a greater or equal height, don't propagate
log.info(f"Unfinished block at old height, so don't propagate")
@ -787,8 +830,8 @@ class FullNode:
async with self.store.lock:
if await self.store.get_sync_mode():
# Add the block to our potential heads list
await self.store.add_potential_head(header_hash, block.block)
# Add the block to our potential tips list
await self.store.add_potential_tip(block.block)
return
# Record our minimum height, and whether we have a full set of heads
least_height: uint32 = min(
@ -820,7 +863,7 @@ class FullNode:
):
async with self.store.lock:
await self.store.clear_sync_info()
await self.store.add_potential_head(header_hash, block.block)
await self.store.add_potential_tip(block.block)
log.info(
f"We are too far behind this block. Our height is {tip_height} and block is at "
f"{block.block.height}"
@ -836,9 +879,9 @@ class FullNode:
except BaseException as e:
log.warning(f"Error {type(e)}{e} with syncing")
finally:
async with (await self.store.get_lock()):
async with self.store.lock:
await self.store.set_sync_mode(False)
await self.store.clear_sync_information()
await self.store.clear_sync_info()
return
elif block.block.height > tip_height + 1:

View File

@ -20,17 +20,17 @@ log = logging.getLogger(__name__)
class Harvester:
def __init__(self):
config_filename = os.path.join(ROOT_DIR, "src", "config", "config.yaml")
plot_config_filename = os.path.join(ROOT_DIR, "src", "config", "plots.yaml")
key_config_filename = os.path.join(ROOT_DIR, "src", "config", "keys.yaml")
config_filename = os.path.join(ROOT_DIR, "config", "config.yaml")
plot_config_filename = os.path.join(ROOT_DIR, "config", "plots.yaml")
key_config_filename = os.path.join(ROOT_DIR, "config", "keys.yaml")
if not os.path.isfile(key_config_filename):
raise RuntimeError(
"Keys not generated. Run ./src/scripts/regenerate_keys.py."
"Keys not generated. Run ./scripts/regenerate_keys.py."
)
if not os.path.isfile(plot_config_filename):
raise RuntimeError(
"Plots not generated. Run ./src/scripts/create_plots.py."
"Plots not generated. Run ./scripts/create_plots.py."
)
self.config = safe_load(open(config_filename, "r"))["harvester"]

View File

@ -12,7 +12,7 @@ from src.util.api_decorators import api_request
class Introducer:
def __init__(self):
config_filename = os.path.join(ROOT_DIR, "src", "config", "config.yaml")
config_filename = os.path.join(ROOT_DIR, "config", "config.yaml")
self.config = yaml.safe_load(open(config_filename, "r"))["introducer"]
def set_server(self, server: ChiaServer):

View File

@ -1,14 +0,0 @@
. .venv/bin/activate
. src/scripts/common.sh
# Starts a harvester, farmer, timelord, introducer, and 3 full nodes.
_run_bg_cmd python -m src.server.start_harvester
_run_bg_cmd python -m src.server.start_timelord
_run_bg_cmd python -m src.server.start_farmer
_run_bg_cmd python -m src.server.start_introducer
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8002 -f
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8444 -t -u 8222
_run_bg_cmd python -m src.server.start_full_node "127.0.0.1" 8005
wait

View File

@ -1,3 +0,0 @@
# Stops all python servers and VDF processes running on this machine
. src/scripts/common.sh

View File

@ -356,7 +356,7 @@ class ChiaServer:
# Read one message at a time, forever
yield (connection, message)
except asyncio.IncompleteReadError:
log.warning(
log.info(
f"Received EOF from {connection.get_peername()}, closing connection."
)
except ConnectionError:

View File

@ -76,6 +76,10 @@ async def main():
full_node._start_bg_tasks()
log.info("Waiting to connect to some peers...")
await asyncio.sleep(3)
log.info(f"Connected to {len(server.global_connections.get_connections())} peers.")
if connect_to_farmer and not server_closed:
peer_info = PeerInfo(
full_node.config["farmer_peer"]["host"],
@ -90,10 +94,6 @@ async def main():
)
_ = await server.start_client(peer_info, None)
log.info("Waiting to connect to some peers...")
await asyncio.sleep(3)
log.info(f"Connected to {len(server.global_connections.get_connections())} peers.")
if not server_closed:
try:
async for msg in full_node._sync():

View File

@ -26,7 +26,7 @@ log = logging.getLogger(__name__)
class Timelord:
def __init__(self):
config_filename = os.path.join(ROOT_DIR, "src", "config", "config.yaml")
config_filename = os.path.join(ROOT_DIR, "config", "config.yaml")
self.config = safe_load(open(config_filename, "r"))["timelord"]
self.free_servers: List[Tuple[str, str]] = list(
zip(self.config["vdf_server_ips"], self.config["vdf_server_ports"])

View File

@ -5,6 +5,12 @@ from typing import Callable, List, Optional
import asyncssh
from prompt_toolkit import Application
from prompt_toolkit.layout.dimension import D
from prompt_toolkit.key_binding.bindings.focus import (
focus_next,
focus_previous,
)
from prompt_toolkit.contrib.ssh import PromptToolkitSSHServer
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.containers import HSplit, VSplit, Window
from prompt_toolkit.layout.layout import Layout
@ -54,7 +60,7 @@ def start_ssh_server(
await ui.await_closed()
uis = uis[1:]
async def interact() -> None:
async def interact():
nonlocal uis, permenantly_closed
if permenantly_closed:
return
@ -104,7 +110,7 @@ class FullNodeUI:
self.parent_close_cb = parent_close_cb
self.kb = self.setup_keybindings()
self.draw_initial()
self.style = Style([("error", "#ff0044"),])
self.style = Style([("error", "#ff0044")])
self.app = Application(
style=self.style,
layout=self.layout,
@ -221,7 +227,7 @@ class FullNodeUI:
return inner
async def search_block(self, text: str):
async with (await self.store.get_lock()):
async with self.store.lock:
try:
block = await self.store.get_block(bytes.fromhex(text))
except ValueError:
@ -296,7 +302,7 @@ class FullNodeUI:
async with self.store.lock:
if await self.store.get_sync_mode():
max_height = -1
for _, block in await self.store.get_potential_heads_tuples():
for _, block in await self.store.get_potential_tips_tuples():
if block.height > max_height:
max_height = block.height

View File

@ -32,14 +32,13 @@ test_constants: Dict[str, Any] = {
test_constants["GENESIS_BLOCK"] = bytes(
bt.create_genesis_block(test_constants, bytes([0] * 32), b"0")
)
# test_constants["GENESIS_BLOCK"] = b'\x15N3\xd3\xf9H\xc2K\x96\xfe\xf2f\xa2\xbf\x87\x0e\x0f,\xd0\xd4\x0f6s\xb1".\\\xf5\x8a\xb4\x03\x84\x8e\xf9\xbb\xa1\xca\xdef3:\xe4?\x0c\xe5\xc6\x12\x80\x17\xd2\xcc\xd7\xb4m\x94\xb7V\x959\xed4\x89\x04b\x08\x07^\xca`\x8f#%\xe9\x9c\x9d\x86y\x10\x96W\x9d\xce\xc1\x15r\x97\x91U\n\x11<\xdf\xb2\xfc\xfb<\x13\x00\x00\x00\x98\xf4\x88\xcb\xb2MYo]\xaf \xd8a>\x06\xfe\xc8F\x8d\x15\x90\x15\xbb\x04\xd48\x10\xc6\xd8b\x82\x88\x7fx<\xe5\xe6\x8b\x8f\x84\xdd\x1cU"\x83\xfb7\x9d`\xb0I\xb3\xbe;bvE\xc6\x92\xdd\xbe\x988\xe9y;\xc6.\xa1\xce\x94\xdc\xd8\xab\xaf\xba\x8f\xd8r\x8br\xc8\xa0\xac\xc0\xe9T\x87\x08\x08\x8b#-\xb6o\xf0\x1f\x0bzv\xb3\x81\x1a\xd4\xf7\x01\xdf\xc5A\x11\xe0\x0c\xc0\x87\xa6\xc2v\xbbR\xc4{"\xa5\xe5\xe0bx7\xfa\n\xae\xea\xfe\x02\xac\xef\xec\xd1\xc2\xc55\x06{\xe1\x0c\xb2\x99q\xd7\xd8\xcb\x97\x86\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\xeb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Y\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x007\x03\x00\x00\x004\x00\x8c\xff\xc3\x00\x00\x00\x00\x00\x00\x01T\x00>\xff\xe3\x00\x80\x00[\x00\x00\x00\x00\x00\x00\x05R\x00\x08\x00\x05\x00j\xff\xfd\x00\x00\x00\x00\x00\x00\x17\xf0\x00j\xff\x99\x00j\x00\x03\x01\x03\xa1\xde8\x0f\xb75VB\xf6"`\x94\xc7\x0b\xaa\x1f\xa2Nv\x8a\xf9\xc9\x9a>\x13\xa3a\xc8\x0c\xcb?\x968\xc7\xeb\xc3\x10a\x1a\xa7\xfb\x85\xa7iu\x14`\x8f\x90\x16o\x97\xd5\t\xa4,\xe5\xed\xe1\x15\x86<\x9d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x1f\xeb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00]\xbf\xd7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\xa1\xde8\x0f\xb75VB\xf6"`\x94\xc7\x0b\xaa\x1f\xa2Nv\x8a\xf9\xc9\x9a>\x13\xa3a\xc8\x0c\xcb?\x13\x16J\xe5\xfc\xa9\x06\xe8A\xe9\xc0Ql\xfb\xaeF\xcd\xd6\xa7\x8ei\xc4\xfa\xd4i\x84\xee\xc9\xe2\xaa\xa4f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00OB!\x81)\xf0l\xbcg\xa3^\xef\x0e\xfc\xb7\x02\x80\xe4\xa9NO\x89\xa0\t\xc3C\xd9\xda\xff\xd7\t\xeebfC&8\x9c+n$\x00\xa4\xe85\x19\xb0\xf6\x18\xa1\xeeR\xae\xec \x82k\xe0v@;\x1c\xc14PMh\xfb\xe3\x1c\xbf\x84O\xcd\xbc\xc4\xb8\xeabz`\xf7\x06;\xf6q\x8b,\x18\tf~\xd1\x11l#\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n\x8b)\xaa\x96x8\xd76J\xa6\x8b[\x98\t\xe0\\\xe3^7qD\x8c\xf5q\x08\xf2\xa2\xc9\xb03mvU\x1a\xe2\x181\x88\xfe\t\x03?\x12\xadj\x9d\xe8K\xb8!\xee\xe7e8\x82\xfb$\xf0Y\xfaJ\x10\x1f\x1a\xe5\xe9\xa8\xbb\xea\x87\xfc\xb12y\x94\x8d,\x16\xe4C\x02\xba\xe6\xac\x94{\xc4c\x07(\xb8\xeb\xab\xe3\xcfy{6\x98\t\xf4\x8fm\xd62\x85\x87\xb0\x03f\x01B]\xe3\xc6\x13l6\x8d\x0e\x18\xc64%\x97\x1a\xa6\xf4\x8b)\xaa\x96x8\xd76J\xa6\x8b[\x98\t\xe0\\\xe3^7qD\x8c\xf5q\x08\xf2\xa2\xc9\xb03mv\x00\x00\x00\x00\x00\x00\x00\x00\x00_\xec\xebf\xff\xc8o8\xd9Rxlmily\xc2\xdb\xc29\xddN\x91\xb4g)\xd7:\'\xfbW\xe9'
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
# loop.close()
class TestGenesisBlock:

View File

@ -1,25 +1,16 @@
import asyncio
import pytest
from bson.binary import Binary
from bson.codec_options import CodecOptions, TypeRegistry
from motor import motor_asyncio
from src.util.ints import uint32, uint64
from src.consensus.constants import constants
from src.database import FullNodeStore
from src.types.body import Body
from src.types.full_block import FullBlock
from src.types.proof_of_space import ProofOfSpace
from src.types.sized_bytes import bytes32
from src.util.ints import uint32, uint64
from src.util.streamable import Streamable
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
class TestDatabase:
@ -40,14 +31,10 @@ class TestDatabase:
# clear sync info
await db.clear_sync_info()
assert await db.get_potential_heads_number() == 0
# add/get potential head, get potential heads num
await db.add_potential_head(genesis.header_hash)
assert await db.get_potential_heads_number() == 1
await db.add_potential_head(genesis.header_hash, genesis)
assert await db.get_potential_heads_number() == 1
assert genesis == await db.get_potential_head(genesis.header_hash)
# add/get potential tip, get potential tips num
await db.add_potential_tip(genesis)
assert genesis == await db.get_potential_tip(genesis.header_hash)
# add/get potential trunk
header = genesis.header_block
@ -56,7 +43,7 @@ class TestDatabase:
# Add potential block
await db.add_potential_block(genesis)
assert genesis == await db.get_potential_block(0)
assert genesis == await db.get_potential_block(uint32(0))
# Add/get candidate block
assert await db.get_candidate_block(0) is None
@ -69,12 +56,13 @@ class TestDatabase:
assert await db.get_candidate_block(genesis.header_hash) == partial
# Add/get unfinished block
key = (genesis.header_hash, 1000)
key = (genesis.header_hash, uint64(1000))
assert await db.get_unfinished_block(key) is None
await db.add_unfinished_block(key, genesis)
assert await db.get_unfinished_block(key) == genesis
assert len(await db.get_unfinished_blocks()) == 1
# Set/get unf block leader
assert db.get_unfinished_block_leader() is None
assert db.get_unfinished_block_leader() == (0, 9999999999)
db.set_unfinished_block_leader(key)
assert db.get_unfinished_block_leader() == key

View File

@ -29,9 +29,9 @@ class TestStreamable(unittest.TestCase):
class TestClass2(Streamable):
a: uint32
b: uint32
c: str
c: bytes
a = TestClass2(uint32(1), uint32(2), "3")
a = TestClass2(uint32(1), uint32(2), b"3")
try:
bytes(a)
assert False