First work version android
This commit is contained in:
parent
029d80d528
commit
43375e69a0
18
Cargo.toml
18
Cargo.toml
|
@ -1,17 +1,9 @@
|
||||||
[package]
|
[package]
|
||||||
name = "hydrabadger"
|
name = "hydrabadger"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
authors = ["c0gent <nsan1129@gmail.com>"]
|
authors = ["KORuL <korul1@yandex.ru>"]
|
||||||
autobins = false
|
autobins = false
|
||||||
|
|
||||||
# [[bin]]
|
|
||||||
# name = "simulation"
|
|
||||||
# path = "src/bin/simulation.rs"
|
|
||||||
|
|
||||||
#[[bin]]
|
|
||||||
#name = "peer_node"
|
|
||||||
#path = "src/bin/peer_node.rs"
|
|
||||||
|
|
||||||
#
|
#
|
||||||
[target.'cfg(target_os="android")'.dependencies]
|
[target.'cfg(target_os="android")'.dependencies]
|
||||||
jni = { version = "0.5", default-features = false }
|
jni = { version = "0.5", default-features = false }
|
||||||
|
@ -28,7 +20,8 @@ exit_upon_epoch_1000 = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "*"
|
log = "*"
|
||||||
# env_logger = "*"
|
[target.'cfg(target_os = "android")'.dependencies]
|
||||||
|
android_logger = { git = "https://github.com/Nercury/android_logger-rs" }
|
||||||
env_logger = "0.5"
|
env_logger = "0.5"
|
||||||
clap = "*"
|
clap = "*"
|
||||||
failure = "*"
|
failure = "*"
|
||||||
|
@ -61,14 +54,9 @@ clear_on_drop = "*"
|
||||||
|
|
||||||
[dependencies.hbbft]
|
[dependencies.hbbft]
|
||||||
version = "*"
|
version = "*"
|
||||||
# git = "https://github.com/c0gent/hbbft"
|
|
||||||
git = "https://github.com/poanetwork/hbbft"
|
git = "https://github.com/poanetwork/hbbft"
|
||||||
# branch = "c0gent-supertraits"
|
|
||||||
# branch = "master"
|
# branch = "master"
|
||||||
branch = "add-mlock-error-handling"
|
branch = "add-mlock-error-handling"
|
||||||
# branch = "afck-agreement"
|
|
||||||
# path = "../hbbft"
|
|
||||||
# features = ["serialization-protobuf"]
|
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
debug = true
|
debug = true
|
||||||
|
|
38
README.md
38
README.md
|
@ -1,18 +1,24 @@
|
||||||
# Hydrabadger
|
# tokio_android
|
||||||
|
|
||||||
An experimental peer-to-peer client using the [Honey Badger Byzantine Fault
|
Try use tokio example on android
|
||||||
Tolerant consensus algorithm](https://github.com/poanetwork/hbbft).
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
### Compile
|
### Compile to android
|
||||||
|
|
||||||
1. `git clone -b android git@github.com:poanetwork/hydrabadger.git`
|
0. download
|
||||||
2. `cd hydrabadger`
|
* Android SDK Tools
|
||||||
|
* NDK
|
||||||
|
* CMake
|
||||||
|
* LLDB
|
||||||
|
|
||||||
|
1. `git clone git@github.com:KORuL/tokio_android.git`
|
||||||
|
2. `cd tokio_android`
|
||||||
|
|
||||||
3. set needs environments
|
3. set needs environments
|
||||||
|
|
||||||
`export ANDROID_HOME=/Users/$USER/Library/Android/sdk`
|
`export ANDROID_HOME=/Users/$USER/Library/Android/sdk`
|
||||||
|
|
||||||
`export NDK_HOME=$ANDROID_HOME/ndk-bundle`
|
`export NDK_HOME=$ANDROID_HOME/ndk-bundle`
|
||||||
|
|
||||||
and etc
|
and etc
|
||||||
|
@ -53,29 +59,19 @@ and etc
|
||||||
|
|
||||||
It may also be necessary for the reed-solomon-erasure package to change the branch to dev
|
It may also be necessary for the reed-solomon-erasure package to change the branch to dev
|
||||||
|
|
||||||
|
### Compile to linux
|
||||||
|
|
||||||
|
1. cargo build
|
||||||
|
|
||||||
### Current State
|
### Current State
|
||||||
|
|
||||||
Network initialization node addition, transaction generation, consensus,
|
Compile to android, work fine on linux, but on android crash
|
||||||
and batch outputs are all generally working. Batch outputs for each epoch are
|
|
||||||
printed to the log.
|
|
||||||
|
|
||||||
Overall the client is fragile and doesn't handle deviation from simple usage
|
|
||||||
very well yet.
|
|
||||||
|
|
||||||
### Test run Linux .so
|
### Test run Linux .so
|
||||||
|
|
||||||
1. `cargo build`
|
1. `cargo build`
|
||||||
2. `./runTestPy`
|
2. `./runTestPy`
|
||||||
|
|
||||||
### Unimplemented
|
|
||||||
|
|
||||||
* **Many edge cases and exceptions:** disconnects, reconnects, etc.
|
|
||||||
* Connecting to a network which is in the process of key generation causes
|
|
||||||
the entire network to fail. For now, wait until the network starts
|
|
||||||
outputting batches before connecting additional peer nodes.
|
|
||||||
* **Error handling** is atrocious, most errors are simply printed to the log.
|
|
||||||
* **Usage as a library** is still a work in progress as the API settles.
|
|
||||||
* **Much, much more...**
|
|
||||||
|
|
||||||
### License
|
### License
|
||||||
|
|
||||||
|
|
6
compile
6
compile
|
@ -7,3 +7,9 @@
|
||||||
cargo build --target aarch64-linux-android --release
|
cargo build --target aarch64-linux-android --release
|
||||||
cargo build --target armv7-linux-androideabi --release
|
cargo build --target armv7-linux-androideabi --release
|
||||||
cargo build --target i686-linux-android --release
|
cargo build --target i686-linux-android --release
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
cp -f /home/user/hbbft/greetings/hydrabadger_android/target/aarch64-linux-android/release/libhydrabadger.so /home/user/AndroidStudioProjects/hbbft/app/src/main/jniLibs/arm64/libhydrabadger.so
|
||||||
|
cp -f /home/user/hbbft/greetings/hydrabadger_android/target/armv7-linux-androideabi/release/libhydrabadger.so /home/user/AndroidStudioProjects/hbbft/app/src/main/jniLibs/armeabi/libhydrabadger.so
|
||||||
|
cp -f /home/user/hbbft/greetings/hydrabadger_android/target/i686-linux-android/release/libhydrabadger.so /home/user/AndroidStudioProjects/hbbft/app/src/main/jniLibs/x86/libhydrabadger.so
|
||||||
|
|
|
@ -7,3 +7,8 @@
|
||||||
cargo build --target aarch64-linux-android
|
cargo build --target aarch64-linux-android
|
||||||
cargo build --target armv7-linux-androideabi
|
cargo build --target armv7-linux-androideabi
|
||||||
cargo build --target i686-linux-android
|
cargo build --target i686-linux-android
|
||||||
|
|
||||||
|
|
||||||
|
cp -f /home/user/hbbft/greetings/hydrabadger_android/target/aarch64-linux-android/debug/libhydrabadger.so /home/user/AndroidStudioProjects/hbbft/app/src/main/jniLibs/arm64/libhydrabadger.so
|
||||||
|
cp -f /home/user/hbbft/greetings/hydrabadger_android/target/armv7-linux-androideabi/debug/libhydrabadger.so /home/user/AndroidStudioProjects/hbbft/app/src/main/jniLibs/armeabi/libhydrabadger.so
|
||||||
|
cp -f /home/user/hbbft/greetings/hydrabadger_android/target/i686-linux-android/debug/libhydrabadger.so /home/user/AndroidStudioProjects/hbbft/app/src/main/jniLibs/x86/libhydrabadger.so
|
||||||
|
|
|
@ -1,6 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
from ctypes import *
|
|
||||||
cdll.LoadLibrary("./target/debug/libhydrabadger.so")
|
|
||||||
libc = CDLL("./target/debug/libhydrabadger.so")
|
|
||||||
libc.rust_main1()
|
|
|
@ -1,6 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
from ctypes import *
|
|
||||||
cdll.LoadLibrary("./target/debug/libhydrabadger.so")
|
|
||||||
libc = CDLL("./target/debug/libhydrabadger.so")
|
|
||||||
libc.rust_main2()
|
|
|
@ -1,6 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
from ctypes import *
|
|
||||||
cdll.LoadLibrary("./target/debug/libhydrabadger.so")
|
|
||||||
libc = CDLL("./target/debug/libhydrabadger.so")
|
|
||||||
libc.rust_main3()
|
|
|
@ -1,8 +0,0 @@
|
||||||
#/bin/bash
|
|
||||||
|
|
||||||
# Starts python scripts
|
|
||||||
# =========================
|
|
||||||
|
|
||||||
gnome-terminal -e "python ./example.py"
|
|
||||||
gnome-terminal -e "python ./example2.py"
|
|
||||||
gnome-terminal -e "python ./example3.py"
|
|
|
@ -72,7 +72,7 @@ impl Handler {
|
||||||
match peers.get_by_uid(&tar_uid) {
|
match peers.get_by_uid(&tar_uid) {
|
||||||
Some(p) => p.tx().unbounded_send(msg).unwrap(),
|
Some(p) => p.tx().unbounded_send(msg).unwrap(),
|
||||||
None => {
|
None => {
|
||||||
println!("Node '{}' is not yet established. Queueing message for now (retry_count: {}).",
|
warn!("Node '{}' is not yet established. Queueing message for now (retry_count: {}).",
|
||||||
tar_uid, retry_count);
|
tar_uid, retry_count);
|
||||||
self.wire_queue.push((tar_uid, msg, retry_count + 1))
|
self.wire_queue.push((tar_uid, msg, retry_count + 1))
|
||||||
},
|
},
|
||||||
|
@ -92,7 +92,7 @@ impl Handler {
|
||||||
},
|
},
|
||||||
StateDsct::AwaitingMorePeersForKeyGeneration => {
|
StateDsct::AwaitingMorePeersForKeyGeneration => {
|
||||||
if peers.count_validators() >= self.hdb.config().keygen_peer_count {
|
if peers.count_validators() >= self.hdb.config().keygen_peer_count {
|
||||||
println!("== BEGINNING KEY GENERATION ==");
|
warn!("== BEGINNING KEY GENERATION ==");
|
||||||
|
|
||||||
let local_uid = *self.hdb.uid();
|
let local_uid = *self.hdb.uid();
|
||||||
let local_in_addr = *self.hdb.addr();
|
let local_in_addr = *self.hdb.addr();
|
||||||
|
@ -102,7 +102,7 @@ impl Handler {
|
||||||
self.hdb.secret_key().clone(), peers, self.hdb.config())?;
|
self.hdb.secret_key().clone(), peers, self.hdb.config())?;
|
||||||
self.hdb.set_state_discriminant(state.discriminant());
|
self.hdb.set_state_discriminant(state.discriminant());
|
||||||
|
|
||||||
println!("KEY GENERATION: Sending initial parts and our own ack.");
|
warn!("KEY GENERATION: Sending initial parts and our own ack.");
|
||||||
self.wire_to_validators(
|
self.wire_to_validators(
|
||||||
WireMessage::hello_from_validator(
|
WireMessage::hello_from_validator(
|
||||||
local_uid, local_in_addr, local_sk, state.network_state(&peers)),
|
local_uid, local_in_addr, local_sk, state.network_state(&peers)),
|
||||||
|
@ -124,7 +124,7 @@ impl Handler {
|
||||||
// validator), input the change into HB and broadcast, etc.
|
// validator), input the change into HB and broadcast, etc.
|
||||||
if request_change_add {
|
if request_change_add {
|
||||||
let qhb = state.qhb_mut().unwrap();
|
let qhb = state.qhb_mut().unwrap();
|
||||||
println!("Change-Adding ('{}') to honey badger.", src_uid);
|
warn!("Change-Adding ('{}') to honey badger.", src_uid);
|
||||||
let step = qhb.input(QhbInput::Change(QhbChange::Add(src_uid, src_pk)))
|
let step = qhb.input(QhbInput::Change(QhbChange::Add(src_uid, src_pk)))
|
||||||
.expect("Error adding new peer to HB");
|
.expect("Error adding new peer to HB");
|
||||||
self.step_queue.push(step);
|
self.step_queue.push(step);
|
||||||
|
@ -184,7 +184,7 @@ impl Handler {
|
||||||
|
|
||||||
fn handle_ack(&self, uid: &Uid, ack: Ack, sync_key_gen: &mut SyncKeyGen<Uid>,
|
fn handle_ack(&self, uid: &Uid, ack: Ack, sync_key_gen: &mut SyncKeyGen<Uid>,
|
||||||
ack_count: &mut usize) {
|
ack_count: &mut usize) {
|
||||||
println!("KEY GENERATION: Handling ack from '{}'...", uid);
|
warn!("KEY GENERATION: Handling ack from '{}'...", uid);
|
||||||
let fault_log = sync_key_gen.handle_ack(uid, ack.clone());
|
let fault_log = sync_key_gen.handle_ack(uid, ack.clone());
|
||||||
if !fault_log.is_empty() {
|
if !fault_log.is_empty() {
|
||||||
error!("Errors handling ack: '{:?}':\n{:?}", ack, fault_log);
|
error!("Errors handling ack: '{:?}':\n{:?}", ack, fault_log);
|
||||||
|
@ -196,7 +196,7 @@ impl Handler {
|
||||||
fn handle_queued_acks(&self, ack_queue: &SegQueue<(Uid, Ack)>,
|
fn handle_queued_acks(&self, ack_queue: &SegQueue<(Uid, Ack)>,
|
||||||
sync_key_gen: &mut SyncKeyGen<Uid>, part_count: usize, ack_count: &mut usize) {
|
sync_key_gen: &mut SyncKeyGen<Uid>, part_count: usize, ack_count: &mut usize) {
|
||||||
if part_count == self.hdb.config().keygen_peer_count + 1 {
|
if part_count == self.hdb.config().keygen_peer_count + 1 {
|
||||||
println!("KEY GENERATION: Handling queued acks...");
|
warn!("KEY GENERATION: Handling queued acks...");
|
||||||
|
|
||||||
debug!(" Peers complete: {}", sync_key_gen.count_complete());
|
debug!(" Peers complete: {}", sync_key_gen.count_complete());
|
||||||
debug!(" Part count: {}", part_count);
|
debug!(" Part count: {}", part_count);
|
||||||
|
@ -213,7 +213,7 @@ impl Handler {
|
||||||
State::GeneratingKeys { ref mut sync_key_gen, ref ack_queue, ref mut part_count,
|
State::GeneratingKeys { ref mut sync_key_gen, ref ack_queue, ref mut part_count,
|
||||||
ref mut ack_count, .. } => {
|
ref mut ack_count, .. } => {
|
||||||
// TODO: Move this match block into a function somewhere for re-use:
|
// TODO: Move this match block into a function somewhere for re-use:
|
||||||
println!("KEY GENERATION: Handling part from '{}'...", src_uid);
|
warn!("KEY GENERATION: Handling part from '{}'...", src_uid);
|
||||||
let mut skg = sync_key_gen.as_mut().unwrap();
|
let mut skg = sync_key_gen.as_mut().unwrap();
|
||||||
let ack = match skg.handle_part(src_uid, part) {
|
let ack = match skg.handle_part(src_uid, part) {
|
||||||
Some(PartOutcome::Valid(ack)) => ack,
|
Some(PartOutcome::Valid(ack)) => ack,
|
||||||
|
@ -228,13 +228,13 @@ impl Handler {
|
||||||
|
|
||||||
*part_count += 1;
|
*part_count += 1;
|
||||||
|
|
||||||
println!("KEY GENERATION: Queueing `Ack`.");
|
warn!("KEY GENERATION: Queueing `Ack`.");
|
||||||
ack_queue.as_ref().unwrap().push((*src_uid, ack.clone()));
|
ack_queue.as_ref().unwrap().push((*src_uid, ack.clone()));
|
||||||
|
|
||||||
self.handle_queued_acks(ack_queue.as_ref().unwrap(), skg, *part_count, ack_count);
|
self.handle_queued_acks(ack_queue.as_ref().unwrap(), skg, *part_count, ack_count);
|
||||||
|
|
||||||
let peers = self.hdb.peers();
|
let peers = self.hdb.peers();
|
||||||
println!("KEY GENERATION: Part from '{}' acknowledged. Broadcasting ack...", src_uid);
|
warn!("KEY GENERATION: Part from '{}' acknowledged. Broadcasting ack...", src_uid);
|
||||||
self.wire_to_validators(WireMessage::key_gen_part_ack(ack), &peers);
|
self.wire_to_validators(WireMessage::key_gen_part_ack(ack), &peers);
|
||||||
|
|
||||||
debug!(" Peers complete: {}", skg.count_complete());
|
debug!(" Peers complete: {}", skg.count_complete());
|
||||||
|
@ -260,7 +260,7 @@ impl Handler {
|
||||||
ref mut ack_count, .. } => {
|
ref mut ack_count, .. } => {
|
||||||
let mut skg = sync_key_gen.as_mut().unwrap();
|
let mut skg = sync_key_gen.as_mut().unwrap();
|
||||||
|
|
||||||
println!("KEY GENERATION: Queueing `Ack`.");
|
warn!("KEY GENERATION: Queueing `Ack`.");
|
||||||
ack_queue.as_ref().unwrap().push((*src_uid, ack.clone()));
|
ack_queue.as_ref().unwrap().push((*src_uid, ack.clone()));
|
||||||
|
|
||||||
self.handle_queued_acks(ack_queue.as_ref().unwrap(), skg, *part_count, ack_count);
|
self.handle_queued_acks(ack_queue.as_ref().unwrap(), skg, *part_count, ack_count);
|
||||||
|
@ -269,10 +269,10 @@ impl Handler {
|
||||||
|
|
||||||
if skg.count_complete() == node_n
|
if skg.count_complete() == node_n
|
||||||
&& *ack_count >= node_n * node_n {
|
&& *ack_count >= node_n * node_n {
|
||||||
println!("KEY GENERATION: All acks received and handled.");
|
warn!("KEY GENERATION: All acks received and handled.");
|
||||||
println!(" Peers complete: {}", skg.count_complete());
|
debug!(" Peers complete: {}", skg.count_complete());
|
||||||
println!(" Part count: {}", part_count);
|
debug!(" Part count: {}", part_count);
|
||||||
println!(" Ack count: {}", ack_count);
|
debug!(" Ack count: {}", ack_count);
|
||||||
|
|
||||||
assert!(skg.is_ready());
|
assert!(skg.is_ready());
|
||||||
keygen_is_complete = true;
|
keygen_is_complete = true;
|
||||||
|
@ -299,7 +299,7 @@ impl Handler {
|
||||||
match state.discriminant() {
|
match state.discriminant() {
|
||||||
StateDsct::Disconnected => unimplemented!("hydrabadger::Handler::handle_join_plan: `Disconnected`"),
|
StateDsct::Disconnected => unimplemented!("hydrabadger::Handler::handle_join_plan: `Disconnected`"),
|
||||||
StateDsct::DeterminingNetworkState => {
|
StateDsct::DeterminingNetworkState => {
|
||||||
println!("Received join plan.");
|
warn!("Received join plan.");
|
||||||
self.instantiate_hb(Some(jp), state, peers)?;
|
self.instantiate_hb(Some(jp), state, peers)?;
|
||||||
},
|
},
|
||||||
StateDsct::AwaitingMorePeersForKeyGeneration | StateDsct::GeneratingKeys => {
|
StateDsct::AwaitingMorePeersForKeyGeneration | StateDsct::GeneratingKeys => {
|
||||||
|
@ -323,7 +323,7 @@ impl Handler {
|
||||||
match state.discriminant() {
|
match state.discriminant() {
|
||||||
StateDsct::Disconnected => { unimplemented!() },
|
StateDsct::Disconnected => { unimplemented!() },
|
||||||
StateDsct::DeterminingNetworkState | StateDsct::GeneratingKeys => {
|
StateDsct::DeterminingNetworkState | StateDsct::GeneratingKeys => {
|
||||||
println!("== INSTANTIATING HONEY BADGER ==");
|
warn!("== INSTANTIATING HONEY BADGER ==");
|
||||||
match jp_opt {
|
match jp_opt {
|
||||||
// Some((nni, pk_set, pk_map)) => {
|
// Some((nni, pk_set, pk_map)) => {
|
||||||
// iom_queue_opt = Some(state.set_observer(*self.hdb.uid(),
|
// iom_queue_opt = Some(state.set_observer(*self.hdb.uid(),
|
||||||
|
@ -449,7 +449,7 @@ impl Handler {
|
||||||
// unimplemented!();
|
// unimplemented!();
|
||||||
},
|
},
|
||||||
State::AwaitingMorePeersForKeyGeneration { .. } => {
|
State::AwaitingMorePeersForKeyGeneration { .. } => {
|
||||||
// info!("Removing peer ({}: '{}') from await list.",
|
// warn!("Removing peer ({}: '{}') from await list.",
|
||||||
// src_out_addr, src_uid.clone().unwrap());
|
// src_out_addr, src_uid.clone().unwrap());
|
||||||
// state.peer_connection_dropped(&*self.hdb.peers());
|
// state.peer_connection_dropped(&*self.hdb.peers());
|
||||||
},
|
},
|
||||||
|
@ -548,7 +548,7 @@ impl Handler {
|
||||||
|
|
||||||
InternalMessageKind::PeerDisconnect => {
|
InternalMessageKind::PeerDisconnect => {
|
||||||
let dropped_src_uid = src_uid.clone().unwrap();
|
let dropped_src_uid = src_uid.clone().unwrap();
|
||||||
println!("Peer disconnected: ({}: '{}').", src_out_addr, dropped_src_uid);
|
warn!("Peer disconnected: ({}: '{}').", src_out_addr, dropped_src_uid);
|
||||||
let peers = self.hdb.peers();
|
let peers = self.hdb.peers();
|
||||||
self.handle_peer_disconnect(dropped_src_uid, state, &peers)?;
|
self.handle_peer_disconnect(dropped_src_uid, state, &peers)?;
|
||||||
},
|
},
|
||||||
|
@ -641,7 +641,7 @@ impl Future for Handler {
|
||||||
},
|
},
|
||||||
Ok(Async::Ready(None)) => {
|
Ok(Async::Ready(None)) => {
|
||||||
// The sending ends have all dropped.
|
// The sending ends have all dropped.
|
||||||
println!("Shutting down Handler...");
|
warn!("Shutting down Handler...");
|
||||||
return Ok(Async::Ready(()));
|
return Ok(Async::Ready(()));
|
||||||
},
|
},
|
||||||
Ok(Async::NotReady) => {},
|
Ok(Async::NotReady) => {},
|
||||||
|
@ -654,10 +654,10 @@ impl Future for Handler {
|
||||||
// Process outgoing wire queue:
|
// Process outgoing wire queue:
|
||||||
while let Some((tar_uid, msg, retry_count)) = self.wire_queue.try_pop() {
|
while let Some((tar_uid, msg, retry_count)) = self.wire_queue.try_pop() {
|
||||||
if retry_count < WIRE_MESSAGE_RETRY_MAX {
|
if retry_count < WIRE_MESSAGE_RETRY_MAX {
|
||||||
println!("Sending queued message from retry queue (retry_count: {})", retry_count);
|
warn!("Sending queued message from retry queue (retry_count: {})", retry_count);
|
||||||
self.wire_to(tar_uid, msg, retry_count, &peers);
|
self.wire_to(tar_uid, msg, retry_count, &peers);
|
||||||
} else {
|
} else {
|
||||||
println!("Discarding queued message for '{}': {:?}", tar_uid, msg);
|
warn!("Discarding queued message for '{}': {:?}", tar_uid, msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -665,10 +665,10 @@ impl Future for Handler {
|
||||||
|
|
||||||
// Process all honey badger output batches:
|
// Process all honey badger output batches:
|
||||||
while let Some(mut step) = self.step_queue.try_pop() {
|
while let Some(mut step) = self.step_queue.try_pop() {
|
||||||
if step.output.len() > 0 { println!("NEW STEP OUTPUT:"); }
|
if step.output.len() > 0 { warn!("NEW STEP OUTPUT:"); }
|
||||||
|
|
||||||
for batch in step.output.drain(..) {
|
for batch in step.output.drain(..) {
|
||||||
println!(" BATCH: \n{:?}", batch);
|
warn!(" BATCH: \n{:?}", batch);
|
||||||
|
|
||||||
if cfg!(exit_upon_epoch_1000) && batch.epoch() >= 1000 {
|
if cfg!(exit_upon_epoch_1000) && batch.epoch() >= 1000 {
|
||||||
return Ok(Async::Ready(()))
|
return Ok(Async::Ready(()))
|
||||||
|
@ -701,7 +701,7 @@ impl Future for Handler {
|
||||||
let extra_delay = self.hdb.config().output_extra_delay_ms;
|
let extra_delay = self.hdb.config().output_extra_delay_ms;
|
||||||
|
|
||||||
if extra_delay > 0 {
|
if extra_delay > 0 {
|
||||||
println!("Delaying batch processing thread for {}ms", extra_delay);
|
warn!("Delaying batch processing thread for {}ms", extra_delay);
|
||||||
::std::thread::sleep(::std::time::Duration::from_millis(extra_delay));
|
::std::thread::sleep(::std::time::Duration::from_millis(extra_delay));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -122,23 +122,17 @@ impl Hydrabadger {
|
||||||
use env_logger;
|
use env_logger;
|
||||||
use chrono::Local;
|
use chrono::Local;
|
||||||
|
|
||||||
env_logger::Builder::new()
|
|
||||||
.format(|buf, record| {
|
|
||||||
write!(buf,
|
|
||||||
"{} [{}] - HYDRABADGER: {}\n",
|
|
||||||
Local::now().format("%Y-%m-%dT%H:%M:%S"),
|
|
||||||
record.level(),
|
|
||||||
record.args()
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.parse(&env::var("HYDRABADGER_LOG").unwrap_or_default())
|
|
||||||
.try_init().ok();
|
|
||||||
|
|
||||||
let uid = Uid::new();
|
let uid = Uid::new();
|
||||||
let secret_key = SecretKey::rand(&mut rand::thread_rng());
|
let secret_key = SecretKey::rand(&mut rand::thread_rng());
|
||||||
|
|
||||||
let (peer_internal_tx, peer_internal_rx) = mpsc::unbounded();
|
let (peer_internal_tx, peer_internal_rx) = mpsc::unbounded();
|
||||||
|
|
||||||
|
warn!("");
|
||||||
|
warn!("Local Hydrabadger Node: ");
|
||||||
|
warn!(" UID: {}", uid);
|
||||||
|
warn!(" Socket Address: {}", addr);
|
||||||
|
warn!(" Public Key: {:?}", secret_key.public_key());
|
||||||
|
|
||||||
warn!("");
|
warn!("");
|
||||||
warn!("****** This is an alpha build. Do not use in production! ******");
|
warn!("****** This is an alpha build. Do not use in production! ******");
|
||||||
warn!("");
|
warn!("");
|
||||||
|
@ -206,7 +200,7 @@ impl Hydrabadger {
|
||||||
/// Sets the publicly visible state discriminant and returns the previous value.
|
/// Sets the publicly visible state discriminant and returns the previous value.
|
||||||
pub(super) fn set_state_discriminant(&self, dsct: StateDsct) -> StateDsct {
|
pub(super) fn set_state_discriminant(&self, dsct: StateDsct) -> StateDsct {
|
||||||
let sd = StateDsct::from(self.inner.state_dsct.swap(dsct.into(), Ordering::Release));
|
let sd = StateDsct::from(self.inner.state_dsct.swap(dsct.into(), Ordering::Release));
|
||||||
println!("State has been set from '{}' to '{}'.", sd, dsct);
|
warn!("State has been set from '{}' to '{}'.", sd, dsct);
|
||||||
sd
|
sd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -236,7 +230,7 @@ impl Hydrabadger {
|
||||||
/// Returns a future that handles incoming connections on `socket`.
|
/// Returns a future that handles incoming connections on `socket`.
|
||||||
fn handle_incoming(self, socket: TcpStream)
|
fn handle_incoming(self, socket: TcpStream)
|
||||||
-> impl Future<Item = (), Error = ()> {
|
-> impl Future<Item = (), Error = ()> {
|
||||||
println!("Incoming connection from '{}'", socket.peer_addr().unwrap());
|
warn!("Incoming connection from '{}'", socket.peer_addr().unwrap());
|
||||||
let wire_msgs = WireMessages::new(socket);
|
let wire_msgs = WireMessages::new(socket);
|
||||||
|
|
||||||
wire_msgs.into_future()
|
wire_msgs.into_future()
|
||||||
|
@ -282,7 +276,7 @@ impl Hydrabadger {
|
||||||
-> impl Future<Item = (), Error = ()> {
|
-> impl Future<Item = (), Error = ()> {
|
||||||
let uid = self.inner.uid.clone();
|
let uid = self.inner.uid.clone();
|
||||||
let in_addr = self.inner.addr;
|
let in_addr = self.inner.addr;
|
||||||
println!("Initiating outgoing connection to: {}", remote_addr);
|
warn!("Initiating outgoing connection to: {}", remote_addr);
|
||||||
|
|
||||||
TcpStream::connect(&remote_addr)
|
TcpStream::connect(&remote_addr)
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
|
@ -322,14 +316,14 @@ impl Hydrabadger {
|
||||||
// Log state:
|
// Log state:
|
||||||
let (dsct, p_ttl, p_est) = hdb.state_info_stale();
|
let (dsct, p_ttl, p_est) = hdb.state_info_stale();
|
||||||
let peer_count = peers.count_total();
|
let peer_count = peers.count_total();
|
||||||
println!("State: {:?}({})", dsct, peer_count);
|
warn!("State: {:?}({})", dsct, peer_count);
|
||||||
|
|
||||||
// Log peer list:
|
// Log peer list:
|
||||||
let peer_list = peers.peers().map(|p| {
|
let peer_list = peers.peers().map(|p| {
|
||||||
p.in_addr().map(|ia| ia.0.to_string())
|
p.in_addr().map(|ia| ia.0.to_string())
|
||||||
.unwrap_or(format!("No in address"))
|
.unwrap_or(format!("No in address"))
|
||||||
}).collect::<Vec<_>>();
|
}).collect::<Vec<_>>();
|
||||||
println!(" Peers: {:?}", peer_list);
|
warn!(" Peers: {:?}", peer_list);
|
||||||
|
|
||||||
// Log (trace) full peerhandler details:
|
// Log (trace) full peerhandler details:
|
||||||
trace!("PeerHandler list:");
|
trace!("PeerHandler list:");
|
||||||
|
@ -340,7 +334,7 @@ impl Hydrabadger {
|
||||||
|
|
||||||
match dsct {
|
match dsct {
|
||||||
StateDsct::Validator => {
|
StateDsct::Validator => {
|
||||||
println!("Generating and inputting {} random transactions...", self.inner.config.txn_gen_count);
|
warn!("Generating and inputting {} random transactions...", self.inner.config.txn_gen_count);
|
||||||
// Send some random transactions to our internal HB instance.
|
// Send some random transactions to our internal HB instance.
|
||||||
let txns: Vec<_> = (0..self.inner.config.txn_gen_count).map(|_| {
|
let txns: Vec<_> = (0..self.inner.config.txn_gen_count).map(|_| {
|
||||||
Transaction::random(self.inner.config.txn_gen_bytes)
|
Transaction::random(self.inner.config.txn_gen_bytes)
|
||||||
|
@ -362,7 +356,7 @@ impl Hydrabadger {
|
||||||
pub fn node(self, remotes: Option<HashSet<SocketAddr>>, reactor_remote: Option<()>)
|
pub fn node(self, remotes: Option<HashSet<SocketAddr>>, reactor_remote: Option<()>)
|
||||||
-> impl Future<Item = (), Error = ()> {
|
-> impl Future<Item = (), Error = ()> {
|
||||||
let socket = TcpListener::bind(&self.inner.addr).unwrap();
|
let socket = TcpListener::bind(&self.inner.addr).unwrap();
|
||||||
println!("Listening on: {}", self.inner.addr);
|
warn!("Listening on: {}", self.inner.addr);
|
||||||
|
|
||||||
let remotes = remotes.unwrap_or(HashSet::new());
|
let remotes = remotes.unwrap_or(HashSet::new());
|
||||||
|
|
||||||
|
@ -392,8 +386,8 @@ impl Hydrabadger {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Starts a node.
|
/// Starts a node.
|
||||||
pub fn run_node(self, remotes: Option<HashSet<SocketAddr>>) {
|
pub fn run_node(self, remotes: Option<HashSet<SocketAddr>>) {
|
||||||
tokio::run(self.node(remotes, None));
|
tokio::run(self.node(remotes, None));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn addr(&self) -> &InAddr {
|
pub fn addr(&self) -> &InAddr {
|
||||||
|
|
|
@ -133,7 +133,7 @@ impl State {
|
||||||
// fn set_determining_network_state(&mut self) {
|
// fn set_determining_network_state(&mut self) {
|
||||||
// *self = match self {
|
// *self = match self {
|
||||||
// State::Disconnected { } => {
|
// State::Disconnected { } => {
|
||||||
// info!("Setting state: `DeterminingNetworkState`.");
|
// warn!("Setting state: `DeterminingNetworkState`.");
|
||||||
// State::DeterminingNetworkState { }
|
// State::DeterminingNetworkState { }
|
||||||
// },
|
// },
|
||||||
// _ => panic!("Must be disconnected before calling `::peer_connection_added`."),
|
// _ => panic!("Must be disconnected before calling `::peer_connection_added`."),
|
||||||
|
@ -144,7 +144,7 @@ impl State {
|
||||||
pub(super) fn set_awaiting_more_peers(&mut self) {
|
pub(super) fn set_awaiting_more_peers(&mut self) {
|
||||||
*self = match self {
|
*self = match self {
|
||||||
State::Disconnected { } => {
|
State::Disconnected { } => {
|
||||||
println!("Setting state: `AwaitingMorePeersForKeyGeneration`.");
|
warn!("Setting state: `AwaitingMorePeersForKeyGeneration`.");
|
||||||
State::AwaitingMorePeersForKeyGeneration {
|
State::AwaitingMorePeersForKeyGeneration {
|
||||||
ack_queue: Some(SegQueue::new()),
|
ack_queue: Some(SegQueue::new()),
|
||||||
iom_queue: Some(SegQueue::new()),
|
iom_queue: Some(SegQueue::new()),
|
||||||
|
@ -154,14 +154,14 @@ impl State {
|
||||||
ref network_state } => {
|
ref network_state } => {
|
||||||
assert!(!network_state.is_some(),
|
assert!(!network_state.is_some(),
|
||||||
"State::set_awaiting_more_peers: Network is active!");
|
"State::set_awaiting_more_peers: Network is active!");
|
||||||
println!("Setting state: `AwaitingMorePeersForKeyGeneration`.");
|
warn!("Setting state: `AwaitingMorePeersForKeyGeneration`.");
|
||||||
State::AwaitingMorePeersForKeyGeneration {
|
State::AwaitingMorePeersForKeyGeneration {
|
||||||
ack_queue: ack_queue.take(),
|
ack_queue: ack_queue.take(),
|
||||||
iom_queue: iom_queue.take(),
|
iom_queue: iom_queue.take(),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
s @ _ => {
|
s @ _ => {
|
||||||
println!("State::set_awaiting_more_peers: Attempted to set \
|
debug!("State::set_awaiting_more_peers: Attempted to set \
|
||||||
`State::AwaitingMorePeersForKeyGeneration` while {}.", s.discriminant());
|
`State::AwaitingMorePeersForKeyGeneration` while {}.", s.discriminant());
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -188,7 +188,7 @@ impl State {
|
||||||
public_keys.clone(), threshold).map_err(Error::SyncKeyGenNew)?;
|
public_keys.clone(), threshold).map_err(Error::SyncKeyGenNew)?;
|
||||||
part = opt_part.expect("This node is not a validator (somehow)!");
|
part = opt_part.expect("This node is not a validator (somehow)!");
|
||||||
|
|
||||||
println!("KEY GENERATION: Handling our own `Part`...");
|
warn!("KEY GENERATION: Handling our own `Part`...");
|
||||||
ack = match sync_key_gen.handle_part(&local_uid, part.clone()) {
|
ack = match sync_key_gen.handle_part(&local_uid, part.clone()) {
|
||||||
Some(PartOutcome::Valid(ack)) => ack,
|
Some(PartOutcome::Valid(ack)) => ack,
|
||||||
Some(PartOutcome::Invalid(faults)) => panic!("Invalid part \
|
Some(PartOutcome::Invalid(faults)) => panic!("Invalid part \
|
||||||
|
@ -196,13 +196,13 @@ impl State {
|
||||||
None => unimplemented!(),
|
None => unimplemented!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// info!("KEY GENERATION: Handling our own `Ack`...");
|
// warn!("KEY GENERATION: Handling our own `Ack`...");
|
||||||
// let fault_log = sync_key_gen.handle_ack(local_uid, ack.clone());
|
// let fault_log = sync_key_gen.handle_ack(local_uid, ack.clone());
|
||||||
// if !fault_log.is_empty() {
|
// if !fault_log.is_empty() {
|
||||||
// error!("Errors acknowledging part (from self):\n {:?}", fault_log);
|
// error!("Errors acknowledging part (from self):\n {:?}", fault_log);
|
||||||
// }
|
// }
|
||||||
|
|
||||||
println!("KEY GENERATION: Queueing our own `Ack`...");
|
warn!("KEY GENERATION: Queueing our own `Ack`...");
|
||||||
ack_queue.as_ref().unwrap().push((*local_uid, ack.clone()));
|
ack_queue.as_ref().unwrap().push((*local_uid, ack.clone()));
|
||||||
|
|
||||||
State::GeneratingKeys {
|
State::GeneratingKeys {
|
||||||
|
@ -243,20 +243,20 @@ impl State {
|
||||||
|
|
||||||
iom_queue_ret = iom_queue.take().unwrap();
|
iom_queue_ret = iom_queue.take().unwrap();
|
||||||
|
|
||||||
println!("");
|
warn!("");
|
||||||
println!("== HONEY BADGER INITIALIZED ==");
|
warn!("== HONEY BADGER INITIALIZED ==");
|
||||||
println!("");
|
warn!("");
|
||||||
|
|
||||||
{ // TODO: Consolidate or remove:
|
{ // TODO: Consolidate or remove:
|
||||||
let pk_set = qhb.dyn_hb().netinfo().public_key_set();
|
let pk_set = qhb.dyn_hb().netinfo().public_key_set();
|
||||||
let pk_map = qhb.dyn_hb().netinfo().public_key_map();
|
let pk_map = qhb.dyn_hb().netinfo().public_key_map();
|
||||||
println!("");
|
warn!("");
|
||||||
println!("");
|
warn!("");
|
||||||
println!("PUBLIC KEY: {:?}", pk_set.public_key());
|
warn!("PUBLIC KEY: {:?}", pk_set.public_key());
|
||||||
println!("PUBLIC KEY SET: \n{:?}", pk_set);
|
warn!("PUBLIC KEY SET: \n{:?}", pk_set);
|
||||||
println!("PUBLIC KEY MAP: \n{:?}", pk_map);
|
warn!("PUBLIC KEY MAP: \n{:?}", pk_map);
|
||||||
println!("");
|
warn!("");
|
||||||
println!("");
|
warn!("");
|
||||||
}
|
}
|
||||||
|
|
||||||
State::Observer { qhb: Some(qhb) }
|
State::Observer { qhb: Some(qhb) }
|
||||||
|
@ -308,20 +308,20 @@ impl State {
|
||||||
.build();
|
.build();
|
||||||
step_queue.push(qhb_step);
|
step_queue.push(qhb_step);
|
||||||
|
|
||||||
println!("");
|
warn!("");
|
||||||
println!("== HONEY BADGER INITIALIZED ==");
|
warn!("== HONEY BADGER INITIALIZED ==");
|
||||||
println!("");
|
warn!("");
|
||||||
|
|
||||||
{ // TODO: Consolidate or remove:
|
{ // TODO: Consolidate or remove:
|
||||||
let pk_set = qhb.dyn_hb().netinfo().public_key_set();
|
let pk_set = qhb.dyn_hb().netinfo().public_key_set();
|
||||||
let pk_map = qhb.dyn_hb().netinfo().public_key_map();
|
let pk_map = qhb.dyn_hb().netinfo().public_key_map();
|
||||||
println!("");
|
warn!("");
|
||||||
println!("");
|
warn!("");
|
||||||
println!("PUBLIC KEY: {:?}", pk_set.public_key());
|
warn!("PUBLIC KEY: {:?}", pk_set.public_key());
|
||||||
println!("PUBLIC KEY SET: \n{:?}", pk_set);
|
warn!("PUBLIC KEY SET: \n{:?}", pk_set);
|
||||||
println!("PUBLIC KEY MAP: \n{:?}", pk_map);
|
warn!("PUBLIC KEY MAP: \n{:?}", pk_map);
|
||||||
println!("");
|
warn!("");
|
||||||
println!("");
|
warn!("");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -338,7 +338,7 @@ impl State {
|
||||||
pub(super) fn promote_to_validator(&mut self) -> Result<(), Error> {
|
pub(super) fn promote_to_validator(&mut self) -> Result<(), Error> {
|
||||||
*self = match self {
|
*self = match self {
|
||||||
State::Observer { ref mut qhb } => {
|
State::Observer { ref mut qhb } => {
|
||||||
println!("=== PROMOTING NODE TO VALIDATOR ===");
|
warn!("=== PROMOTING NODE TO VALIDATOR ===");
|
||||||
State::Validator { qhb: qhb.take() }
|
State::Validator { qhb: qhb.take() }
|
||||||
},
|
},
|
||||||
s @ _ => panic!("State::promote_to_validator: State must be `Observer`. State: {}",
|
s @ _ => panic!("State::promote_to_validator: State must be `Observer`. State: {}",
|
||||||
|
@ -353,7 +353,7 @@ impl State {
|
||||||
let _dsct = self.discriminant();
|
let _dsct = self.discriminant();
|
||||||
*self = match self {
|
*self = match self {
|
||||||
State::Disconnected { } => {
|
State::Disconnected { } => {
|
||||||
println!("Setting state: `DeterminingNetworkState`.");
|
warn!("Setting state: `DeterminingNetworkState`.");
|
||||||
State::DeterminingNetworkState {
|
State::DeterminingNetworkState {
|
||||||
ack_queue: Some(SegQueue::new()),
|
ack_queue: Some(SegQueue::new()),
|
||||||
iom_queue: Some(SegQueue::new()),
|
iom_queue: Some(SegQueue::new()),
|
||||||
|
@ -380,7 +380,7 @@ impl State {
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
State::AwaitingMorePeersForKeyGeneration { .. } => {
|
State::AwaitingMorePeersForKeyGeneration { .. } => {
|
||||||
println!("Ignoring peer disconnection when \
|
debug!("Ignoring peer disconnection when \
|
||||||
`State::AwaitingMorePeersForKeyGeneration`.");
|
`State::AwaitingMorePeersForKeyGeneration`.");
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
|
@ -388,11 +388,11 @@ impl State {
|
||||||
panic!("FIXME: RESTART KEY GENERATION PROCESS AFTER PEER DISCONNECTS.");
|
panic!("FIXME: RESTART KEY GENERATION PROCESS AFTER PEER DISCONNECTS.");
|
||||||
}
|
}
|
||||||
State::Observer { qhb: _, .. } => {
|
State::Observer { qhb: _, .. } => {
|
||||||
println!("Ignoring peer disconnection when `State::Observer`.");
|
debug!("Ignoring peer disconnection when `State::Observer`.");
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
State::Validator { qhb: _, .. } => {
|
State::Validator { qhb: _, .. } => {
|
||||||
println!("Ignoring peer disconnection when `State::Validator`.");
|
debug!("Ignoring peer disconnection when `State::Validator`.");
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
51
src/lib.rs
51
src/lib.rs
|
@ -3,10 +3,14 @@
|
||||||
#[cfg(feature = "nightly")]
|
#[cfg(feature = "nightly")]
|
||||||
extern crate alloc_system;
|
extern crate alloc_system;
|
||||||
extern crate clap;
|
extern crate clap;
|
||||||
|
#[macro_use] extern crate log;
|
||||||
|
extern crate android_logger;
|
||||||
|
|
||||||
|
use android_logger::Filter;
|
||||||
|
use log::Level;
|
||||||
|
|
||||||
extern crate env_logger;
|
extern crate env_logger;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate failure;
|
extern crate failure;
|
||||||
extern crate crossbeam;
|
extern crate crossbeam;
|
||||||
// #[macro_use] extern crate crossbeam_channel;
|
// #[macro_use] extern crate crossbeam_channel;
|
||||||
|
@ -440,8 +444,13 @@ use std::collections::HashSet;
|
||||||
use std::net::Ipv4Addr;
|
use std::net::Ipv4Addr;
|
||||||
use std::net::IpAddr;
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
fn native_activity_create() {
|
||||||
|
android_logger::init_once(Filter::default().with_min_level(Level::Trace), None);
|
||||||
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern fn rust_main1() {
|
pub extern fn startNode1() {
|
||||||
|
warn!("enter to startNode1");
|
||||||
let bind_address: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3000);
|
let bind_address: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3000);
|
||||||
|
|
||||||
let mut remote_addresses: HashSet<SocketAddr> = HashSet::new();
|
let mut remote_addresses: HashSet<SocketAddr> = HashSet::new();
|
||||||
|
@ -451,11 +460,14 @@ pub extern fn rust_main1() {
|
||||||
let cfg = Config::default();
|
let cfg = Config::default();
|
||||||
|
|
||||||
let hb = Hydrabadger::new(bind_address, cfg);
|
let hb = Hydrabadger::new(bind_address, cfg);
|
||||||
|
warn!("Hydrabadger::new");
|
||||||
hb.run_node(Some(remote_addresses));
|
hb.run_node(Some(remote_addresses));
|
||||||
|
warn!("startNode1");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern fn rust_main2() {
|
pub extern fn startNode2() {
|
||||||
|
warn!("enter to startNode2");
|
||||||
let bind_address: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3001);
|
let bind_address: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3001);
|
||||||
|
|
||||||
let mut remote_addresses: HashSet<SocketAddr> = HashSet::new();
|
let mut remote_addresses: HashSet<SocketAddr> = HashSet::new();
|
||||||
|
@ -466,10 +478,12 @@ pub extern fn rust_main2() {
|
||||||
|
|
||||||
let hb = Hydrabadger::new(bind_address, cfg);
|
let hb = Hydrabadger::new(bind_address, cfg);
|
||||||
hb.run_node(Some(remote_addresses));
|
hb.run_node(Some(remote_addresses));
|
||||||
|
warn!("startNode2");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern fn rust_main3() {
|
pub extern fn startNode3() {
|
||||||
|
warn!("enter to startNode3");
|
||||||
let bind_address: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3002);
|
let bind_address: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3002);
|
||||||
|
|
||||||
let mut remote_addresses: HashSet<SocketAddr> = HashSet::new();
|
let mut remote_addresses: HashSet<SocketAddr> = HashSet::new();
|
||||||
|
@ -480,6 +494,14 @@ pub extern fn rust_main3() {
|
||||||
|
|
||||||
let hb = Hydrabadger::new(bind_address, cfg);
|
let hb = Hydrabadger::new(bind_address, cfg);
|
||||||
hb.run_node(Some(remote_addresses));
|
hb.run_node(Some(remote_addresses));
|
||||||
|
warn!("startNode3");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern fn testLog() {
|
||||||
|
native_activity_create();
|
||||||
|
|
||||||
|
warn!("I am Started");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Expose the JNI interface for android below
|
/// Expose the JNI interface for android below
|
||||||
|
@ -496,22 +518,29 @@ pub mod android {
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern fn Java_ru_hintsolution_hbbft_hbbft_MainActivity_startNode1(_env: JNIEnv, _: JClass) -> jboolean {
|
pub unsafe extern fn Java_ru_hintsolution_hbbft_hbbft_MainActivity_startNode1(_env: JNIEnv, _: JClass) -> jboolean {
|
||||||
// Our Java companion code might pass-in "world" as a string, hence the name.
|
// Our Java companion code might pass-in "world" as a string, hence the name.
|
||||||
rust_main1();
|
startNode1();
|
||||||
1
|
1
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern fn Java_ru_hintsolution_hbbft_hbbft_MainActivity_startNode2(_env: JNIEnv, _: JClass) -> jboolean {
|
pub unsafe extern fn Java_ru_hintsolution_hbbft_hbbft_MainActivity_startNode2(_env: JNIEnv, _: JClass) -> jboolean {
|
||||||
// Our Java companion code might pass-in "world" as a string, hence the name.
|
// Our Java companion code might pass-in "world" as a string, hence the name.
|
||||||
rust_main2();
|
startNode2();
|
||||||
1
|
1
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub unsafe extern fn Java_ru_hintsolution_hbbft_hbbft_MainActivity_startNode3(_env: JNIEnv, _: JClass) -> jboolean {
|
pub unsafe extern fn Java_ru_hintsolution_hbbft_hbbft_MainActivity_startNode3(_env: JNIEnv, _: JClass) -> jboolean {
|
||||||
// Our Java companion code might pass-in "world" as a string, hence the name.
|
// Our Java companion code might pass-in "world" as a string, hence the name.
|
||||||
rust_main3();
|
startNode3();
|
||||||
1
|
1
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub unsafe extern fn Java_ru_hintsolution_hbbft_hbbft_MainActivity_testLog(_env: JNIEnv, _: JClass) -> jboolean {
|
||||||
|
// Our Java companion code might pass-in "world" as a string, hence the name.
|
||||||
|
testLog();
|
||||||
|
1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -150,7 +150,7 @@ impl Future for PeerHandler {
|
||||||
} else {
|
} else {
|
||||||
// EOF was reached. The remote client has disconnected. There is
|
// EOF was reached. The remote client has disconnected. There is
|
||||||
// nothing more to do.
|
// nothing more to do.
|
||||||
println!("Peer ({}: '{}') disconnected.", self.out_addr, self.uid.clone().unwrap());
|
warn!("Peer ({}: '{}') disconnected.", self.out_addr, self.uid.clone().unwrap());
|
||||||
return Ok(Async::Ready(()));
|
return Ok(Async::Ready(()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue