Merge pull request #14 from ebfull/affine-api

Add "normalize" method to Group and improve performance of serialization
This commit is contained in:
ebfull 2016-10-14 13:36:42 -06:00 committed by GitHub
commit e6ebe3f6d4
6 changed files with 140 additions and 226 deletions

View File

@ -1,6 +1,6 @@
[package] [package]
name = "bn" name = "bn"
version = "0.4.0" version = "0.4.1"
authors = ["Sean Bowe <ewillbefull@gmail.com>"] authors = ["Sean Bowe <ewillbefull@gmail.com>"]
description = "Pairing cryptography with the Barreto-Naehrig curve" description = "Pairing cryptography with the Barreto-Naehrig curve"
keywords = ["pairing","crypto","cryptography"] keywords = ["pairing","crypto","cryptography"]

View File

@ -14,7 +14,7 @@ Add the `bn` crate to your dependencies in `Cargo.toml`...
```toml ```toml
[dependencies] [dependencies]
bn = "0.4.0" bn = "0.4.1"
``` ```
...and add an `extern crate` declaration to your crate root: ...and add an `extern crate` declaration to your crate root:

View File

@ -8,272 +8,153 @@ use bn::*;
use bincode::SizeLimit::Infinite; use bincode::SizeLimit::Infinite;
use bincode::rustc_serialize::{encode, decode}; use bincode::rustc_serialize::{encode, decode};
#[bench] const SAMPLES: usize = 30;
fn g1_deserialization(b: &mut test::Bencher) {
const SAMPLES: usize = 1000;
let rng = &mut rand::thread_rng(); macro_rules! benchmark(
($name:ident, $input:ident($rng:ident) = $pre:expr; $post:expr) => (
#[bench]
fn $name(b: &mut test::Bencher) {
let $rng = &mut rand::thread_rng();
let $input: Vec<_> = (0..SAMPLES).map(|_| $pre).collect();
let serialized: Vec<_> = (0..SAMPLES).map(|_| encode(&G1::random(rng), Infinite).unwrap()).collect(); b.bench_n(SAMPLES as u64, |b| {
let mut c = 0;
let mut ctr = 0; b.iter(|| {
c += 1;
b.iter(|| { let $input = &$input[c % SAMPLES];
ctr += 1;
decode::<G1>(&serialized[ctr % SAMPLES]).unwrap() $post
}); })
} })
}
)
);
#[bench] benchmark!(g1_serialization,
fn g2_deserialization(b: &mut test::Bencher) { input(rng) = G1::random(rng);
const SAMPLES: usize = 1000;
let rng = &mut rand::thread_rng(); encode(input, Infinite).unwrap()
);
let serialized: Vec<_> = (0..SAMPLES).map(|_| encode(&G2::random(rng), Infinite).unwrap()).collect(); benchmark!(g1_serialization_normalized,
input(rng) = {let mut tmp = G1::random(rng); tmp.normalize(); tmp};
let mut ctr = 0; encode(input, Infinite).unwrap()
);
b.iter(|| { benchmark!(g2_serialization,
ctr += 1; input(rng) = G2::random(rng);
decode::<G2>(&serialized[ctr % SAMPLES]).unwrap() encode(input, Infinite).unwrap()
}); );
}
#[bench] benchmark!(g2_serialization_normalized,
fn fr_addition(b: &mut test::Bencher) { input(rng) = {let mut tmp = G2::random(rng); tmp.normalize(); tmp};
const SAMPLES: usize = 1000;
let rng = &mut rand::thread_rng(); encode(input, Infinite).unwrap()
);
let v1: Vec<_> = (0..SAMPLES).map(|_| Fr::random(rng)).collect(); benchmark!(g1_deserialization,
let v2: Vec<_> = (0..SAMPLES).map(|_| Fr::random(rng)).collect(); input(rng) = {encode(&G1::random(rng), Infinite).unwrap()};
let mut ctr = 0; decode::<G1>(input).unwrap()
);
b.iter(|| { benchmark!(g2_deserialization,
ctr += 1; input(rng) = {encode(&G2::random(rng), Infinite).unwrap()};
v1[ctr % SAMPLES] + v2[ctr % SAMPLES] decode::<G2>(input).unwrap()
}); );
}
#[bench] benchmark!(fr_addition,
fn fr_subtraction(b: &mut test::Bencher) { input(rng) = (Fr::random(rng), Fr::random(rng));
const SAMPLES: usize = 1000;
let rng = &mut rand::thread_rng(); input.0 + input.1
);
let v1: Vec<_> = (0..SAMPLES).map(|_| Fr::random(rng)).collect(); benchmark!(fr_subtraction,
let v2: Vec<_> = (0..SAMPLES).map(|_| Fr::random(rng)).collect(); input(rng) = (Fr::random(rng), Fr::random(rng));
let mut ctr = 0; input.0 - input.1
);
b.iter(|| { benchmark!(fr_multiplication,
ctr += 1; input(rng) = (Fr::random(rng), Fr::random(rng));
v1[ctr % SAMPLES] - v2[ctr % SAMPLES] input.0 * input.1
}); );
}
#[bench] benchmark!(fr_inverses,
fn fr_multiplication(b: &mut test::Bencher) { input(rng) = Fr::random(rng);
const SAMPLES: usize = 1000;
let rng = &mut rand::thread_rng(); input.inverse()
);
let v1: Vec<_> = (0..SAMPLES).map(|_| Fr::random(rng)).collect(); benchmark!(g1_addition,
let v2: Vec<_> = (0..SAMPLES).map(|_| Fr::random(rng)).collect(); input(rng) = (G1::random(rng), G1::random(rng));
let mut ctr = 0; input.0 + input.1
);
b.iter(|| { benchmark!(g1_subtraction,
ctr += 1; input(rng) = (G1::random(rng), G1::random(rng));
v1[ctr % SAMPLES] * v2[ctr % SAMPLES] input.0 - input.1
}); );
}
#[bench] benchmark!(g1_scalar_multiplication,
fn fr_inverses(b: &mut test::Bencher) { input(rng) = (G1::random(rng), Fr::random(rng));
const SAMPLES: usize = 1000;
let rng = &mut rand::thread_rng(); input.0 * input.1
);
let v1: Vec<_> = (0..SAMPLES).map(|_| Fr::random(rng)).collect(); benchmark!(g2_addition,
input(rng) = (G2::random(rng), G2::random(rng));
let mut ctr = 0; input.0 + input.1
);
b.iter(|| { benchmark!(g2_subtraction,
ctr += 1; input(rng) = (G2::random(rng), G2::random(rng));
v1[ctr % SAMPLES].inverse() input.0 - input.1
}); );
}
#[bench] benchmark!(g2_scalar_multiplication,
fn g1_addition(b: &mut test::Bencher) { input(rng) = (G2::random(rng), Fr::random(rng));
const SAMPLES: usize = 100;
let rng = &mut rand::thread_rng(); input.0 * input.1
);
let v1: Vec<_> = (0..SAMPLES).map(|_| G1::random(rng)).collect(); benchmark!(fq12_scalar_multiplication,
let v2: Vec<_> = (0..SAMPLES).map(|_| G1::random(rng)).collect(); input(rng) = {
let g1_1 = G1::random(rng);
let g2_1 = G2::random(rng);
let mut ctr = 0; let g1_2 = G1::random(rng);
let g2_2 = G2::random(rng);
b.iter(|| { (pairing(g1_1, g2_1), pairing(g1_2, g2_2))
ctr += 1; };
v1[ctr % SAMPLES] + v2[ctr % SAMPLES] input.0 * input.1
}); );
}
#[bench] benchmark!(fq12_exponentiation,
fn g1_subtraction(b: &mut test::Bencher) { input(rng) = ({
const SAMPLES: usize = 100; let g1 = G1::random(rng);
let g2 = G2::random(rng);
let rng = &mut rand::thread_rng(); pairing(g1, g2)
}, Fr::random(rng));
let v1: Vec<_> = (0..SAMPLES).map(|_| G1::random(rng)).collect(); input.0.pow(input.1)
let v2: Vec<_> = (0..SAMPLES).map(|_| G1::random(rng)).collect(); );
let mut ctr = 0; benchmark!(perform_pairing,
input(rng) = (G1::random(rng), G2::random(rng));
b.iter(|| { pairing(input.0, input.1)
ctr += 1; );
v1[ctr % SAMPLES] - v2[ctr % SAMPLES]
});
}
#[bench]
fn g1_scalar_multiplication(b: &mut test::Bencher) {
const SAMPLES: usize = 100;
let rng = &mut rand::thread_rng();
let v1: Vec<_> = (0..SAMPLES).map(|_| G1::random(rng)).collect();
let v2: Vec<_> = (0..SAMPLES).map(|_| Fr::random(rng)).collect();
let mut ctr = 0;
b.iter(|| {
ctr += 1;
v1[ctr % SAMPLES] * v2[ctr % SAMPLES]
});
}
#[bench]
fn g2_addition(b: &mut test::Bencher) {
const SAMPLES: usize = 100;
let rng = &mut rand::thread_rng();
let v1: Vec<_> = (0..SAMPLES).map(|_| G2::random(rng)).collect();
let v2: Vec<_> = (0..SAMPLES).map(|_| G2::random(rng)).collect();
let mut ctr = 0;
b.iter(|| {
ctr += 1;
v1[ctr % SAMPLES] + v2[ctr % SAMPLES]
});
}
#[bench]
fn g2_subtraction(b: &mut test::Bencher) {
const SAMPLES: usize = 100;
let rng = &mut rand::thread_rng();
let v1: Vec<_> = (0..SAMPLES).map(|_| G2::random(rng)).collect();
let v2: Vec<_> = (0..SAMPLES).map(|_| G2::random(rng)).collect();
let mut ctr = 0;
b.iter(|| {
ctr += 1;
v1[ctr % SAMPLES] - v2[ctr % SAMPLES]
});
}
#[bench]
fn g2_scalar_multiplication(b: &mut test::Bencher) {
const SAMPLES: usize = 100;
let rng = &mut rand::thread_rng();
let v1: Vec<_> = (0..SAMPLES).map(|_| G2::random(rng)).collect();
let v2: Vec<_> = (0..SAMPLES).map(|_| Fr::random(rng)).collect();
let mut ctr = 0;
b.iter(|| {
ctr += 1;
v1[ctr % SAMPLES] * v2[ctr % SAMPLES]
});
}
#[bench]
fn fq12_scalar_multiplication(b: &mut test::Bencher) {
const SAMPLES: usize = 100;
let rng = &mut rand::thread_rng();
let v1: Vec<_> = (0..SAMPLES).map(|_| G1::random(rng)).collect();
let v2: Vec<_> = (0..SAMPLES).map(|_| G2::random(rng)).collect();
let v3: Vec<_> = (0..SAMPLES).map(|i| pairing(v1[i], v2[i])).collect();
let mut ctr = 0;
b.iter(|| {
ctr += 1;
v3[(ctr + SAMPLES/50) % SAMPLES] * v3[ctr % SAMPLES]
});
}
#[bench]
fn fq12_exponentiation(b: &mut test::Bencher) {
const SAMPLES: usize = 100;
let rng = &mut rand::thread_rng();
let v1: Vec<_> = (0..SAMPLES).map(|_| G1::random(rng)).collect();
let v2: Vec<_> = (0..SAMPLES).map(|_| G2::random(rng)).collect();
let v3: Vec<_> = (0..SAMPLES).map(|i| pairing(v1[i], v2[i])).collect();
let v4: Vec<_> = (0..SAMPLES).map(|_| Fr::random(rng)).collect();
let mut ctr = 0;
b.iter(|| {
ctr += 1;
v3[ctr % SAMPLES].pow(v4[ctr % SAMPLES])
});
}
#[bench]
fn perform_pairing(b: &mut test::Bencher) {
const SAMPLES: usize = 100;
let rng = &mut rand::thread_rng();
let v1: Vec<_> = (0..SAMPLES).map(|_| G1::random(rng)).collect();
let v2: Vec<_> = (0..SAMPLES).map(|_| G2::random(rng)).collect();
let mut ctr = 0;
b.iter(|| {
ctr += 1;
pairing(v1[ctr % SAMPLES], v2[ctr % SAMPLES])
});
}

View File

@ -112,6 +112,11 @@ impl<P: GroupParams> G<P> {
pub fn to_affine(&self) -> Option<AffineG<P>> { pub fn to_affine(&self) -> Option<AffineG<P>> {
if self.z.is_zero() { if self.z.is_zero() {
None None
} else if self.z == P::Base::one() {
Some(AffineG {
x: self.x,
y: self.y
})
} else { } else {
let zinv = self.z.inverse().unwrap(); let zinv = self.z.inverse().unwrap();
let zinv_squared = zinv.squared(); let zinv_squared = zinv.squared();
@ -125,7 +130,7 @@ impl<P: GroupParams> G<P> {
} }
impl<P: GroupParams> AffineG<P> { impl<P: GroupParams> AffineG<P> {
fn to_jacobian(&self) -> G<P> { pub fn to_jacobian(&self) -> G<P> {
G { G {
x: self.x, x: self.x,
y: self.y, y: self.y,

View File

@ -73,6 +73,7 @@ pub trait Group:
fn one() -> Self; fn one() -> Self;
fn random<R: Rng>(rng: &mut R) -> Self; fn random<R: Rng>(rng: &mut R) -> Self;
fn is_zero(&self) -> bool; fn is_zero(&self) -> bool;
fn normalize(&mut self);
} }
#[derive(Copy, Clone, PartialEq, Eq, RustcDecodable, RustcEncodable)] #[derive(Copy, Clone, PartialEq, Eq, RustcDecodable, RustcEncodable)]
@ -84,6 +85,14 @@ impl Group for G1 {
fn one() -> Self { G1(groups::G1::one()) } fn one() -> Self { G1(groups::G1::one()) }
fn random<R: Rng>(rng: &mut R) -> Self { G1(groups::G1::random(rng)) } fn random<R: Rng>(rng: &mut R) -> Self { G1(groups::G1::random(rng)) }
fn is_zero(&self) -> bool { self.0.is_zero() } fn is_zero(&self) -> bool { self.0.is_zero() }
fn normalize(&mut self) {
let new = match self.0.to_affine() {
Some(a) => a,
None => return
};
self.0 = new.to_jacobian();
}
} }
impl Add<G1> for G1 { impl Add<G1> for G1 {
@ -119,6 +128,14 @@ impl Group for G2 {
fn one() -> Self { G2(groups::G2::one()) } fn one() -> Self { G2(groups::G2::one()) }
fn random<R: Rng>(rng: &mut R) -> Self { G2(groups::G2::random(rng)) } fn random<R: Rng>(rng: &mut R) -> Self { G2(groups::G2::random(rng)) }
fn is_zero(&self) -> bool { self.0.is_zero() } fn is_zero(&self) -> bool { self.0.is_zero() }
fn normalize(&mut self) {
let new = match self.0.to_affine() {
Some(a) => a,
None => return
};
self.0 = new.to_jacobian();
}
} }
impl Add<G2> for G2 { impl Add<G2> for G2 {

View File

@ -35,6 +35,11 @@ fn group_serialization_and_deserialization() {
a = a * b; a = a * b;
assert!(reserialize(a) == a); assert!(reserialize(a) == a);
assert!(reserialize(reserialize(a)) == a);
let mut c = a;
c.normalize();
assert!(a == c);
} }
let mut a = G2::one(); let mut a = G2::one();
@ -42,6 +47,12 @@ fn group_serialization_and_deserialization() {
a = a * b; a = a * b;
assert!(reserialize(a) == a); assert!(reserialize(a) == a);
assert!(reserialize(reserialize(a)) == a);
let mut c = a;
c.normalize();
assert!(a == c);
} }
} }