w3f_bls/verifiers.rs
1//! ## Algorithms for optimized verification of aggregate and batched BLS signatures.
2//!
3//!
4//!
5
6use core::borrow::Borrow;
7#[cfg(feature = "std")]
8use std::collections::HashMap;
9
10#[cfg(feature = "std")]
11use ark_ec::AffineRepr;
12#[cfg(feature = "std")]
13use ark_ff::field_hashers::{DefaultFieldHasher, HashToField};
14#[cfg(feature = "std")]
15use ark_serialize::CanonicalSerialize;
16#[cfg(feature = "std")]
17use digest::DynDigest;
18
19use ark_ec::CurveGroup;
20
21use alloc::vec::Vec;
22
23use super::*;
24
25// We define these convenience type alias here instead of engine.rs
26// because seemingly only verifier implementations really employ them.
27// And we `pub use engine::*` in lib.rs.
28
29/// Convenience type alias for projective form of `PublicKeyGroup`
30pub type PublicKeyProjective<E> = <E as EngineBLS>::PublicKeyGroup;
31
32/// Convenience type alias for affine form of `PublicKeyGroup`
33pub type PublicKeyAffine<E> = <<E as EngineBLS>::PublicKeyGroup as CurveGroup>::Affine;
34
35/// Convenience type alias for projective form of `SignatureGroup`
36pub type SignatureProjective<E> = <E as EngineBLS>::SignatureGroup;
37
38/// Convenience type alias for affine form of `SignatureGroup`
39pub type SignatureAffine<E> = <<E as EngineBLS>::SignatureGroup as CurveGroup>::Affine;
40
41/// Simple unoptimized BLS signature verification. Useful for testing.
42pub fn verify_unoptimized<S: Signed>(s: S) -> bool {
43 let signature = S::E::prepare_signature(s.signature().0);
44 let prepared = s
45 .messages_and_publickeys()
46 .map(|(message, public_key)| {
47 (
48 S::E::prepare_public_key(public_key.borrow().0),
49 S::E::prepare_signature(message.borrow().hash_to_signature_curve::<S::E>()),
50 )
51 })
52 .collect::<Vec<(_, _)>>();
53 S::E::verify_prepared(signature, prepared.iter())
54}
55
56/// Simple universal BLS signature verification
57///
58/// We support an unstable `Signed::messages_and_publickeys()`
59/// securely by calling it only once and batch normalizing all
60/// points, as do most other verification routines here.
61/// We do no optimizations that reduce the number of pairings
62/// by combining repeated messages or signers.
63pub fn verify_simple<S: Signed>(s: S) -> bool {
64 let signature = s.signature().0;
65 // We could write this more idiomatically using iterator adaptors,
66 // and avoiding an unecessary allocation for publickeys, but only
67 // by calling self.messages_and_publickeys() repeatedly.
68 let itr = s.messages_and_publickeys();
69 let l = {
70 let (lower, upper) = itr.size_hint();
71 upper.unwrap_or(lower)
72 };
73 let mut gpk = Vec::with_capacity(l);
74 let mut gms = Vec::with_capacity(l + 1);
75 for (message, publickey) in itr {
76 gpk.push(publickey.borrow().0.clone());
77 gms.push(message.borrow().hash_to_signature_curve::<S::E>());
78 }
79 let gpk = <<S as Signed>::E as EngineBLS>::PublicKeyGroup::normalize_batch(gpk.as_mut_slice());
80 gms.push(signature);
81 let mut gms =
82 <<S as Signed>::E as EngineBLS>::SignatureGroup::normalize_batch(gms.as_mut_slice());
83 let signature = <<S as Signed>::E as EngineBLS>::prepare_signature(gms.pop().unwrap());
84 let prepared = gpk
85 .iter()
86 .zip(gms)
87 .map(|(pk, m)| {
88 (
89 <<S as Signed>::E as EngineBLS>::prepare_public_key(*pk),
90 <<S as Signed>::E as EngineBLS>::prepare_signature(m),
91 )
92 })
93 .collect::<Vec<(_, _)>>();
94 S::E::verify_prepared(signature, prepared.iter())
95}
96
97/// BLS signature verification optimized for all unique messages
98///
99/// Assuming all messages are distinct, the minimum number of pairings
100/// is the number of unique signers, which we achieve here.
101/// We do not verify message uniqueness here, but leave this to the
102/// aggregate signature type, like `DistinctMessages`.
103///
104/// We merge any messages with identical signers and batch normalize
105/// message points and the signature itself.
106/// We optionally batch normalize the public keys in the event that
107/// they are provided by algerbaic operaations, but this sounds
108/// unlikely given our requirement that messages be distinct.
109#[cfg(feature = "std")]
110pub fn verify_with_distinct_messages<S: Signed>(signed: S, normalize_public_keys: bool) -> bool {
111 let signature = signed.signature().0;
112 // We first hash the messages to the signature curve and
113 // normalize the public keys to operate on them as bytes.
114 // TODO: Assess if we should mutate in place using interior
115 // mutability, maybe using `BorrowMut` support in
116 // `batch_normalization`.
117 let itr = signed.messages_and_publickeys();
118 let l = {
119 let (lower, upper) = itr.size_hint();
120 upper.unwrap_or(lower)
121 };
122 let mut publickeys = Vec::with_capacity(l);
123 let mut messages = Vec::with_capacity(l + 1);
124 for (m, pk) in itr {
125 publickeys.push(pk.borrow().0.clone());
126 messages.push(m.borrow().hash_to_signature_curve::<S::E>());
127 }
128 let mut affine_publickeys = if normalize_public_keys {
129 <<S as Signed>::E as EngineBLS>::PublicKeyGroup::normalize_batch(&publickeys)
130 } else {
131 publickeys
132 .iter()
133 .map(|pk| pk.into_affine())
134 .collect::<Vec<_>>()
135 };
136
137 // We next accumulate message points with the same signer.
138 // We could avoid the allocation here if we sorted both
139 // arrays in parallel. This might mean (a) some sort function
140 // using `ops::IndexMut` instead of slices, and (b) wrapper types
141 // types to make tuples of slices satisfy `ops::IndexMut`.
142 // TODO: Impl PartialEq, Eq, Hash for pairing::EncodedPoint
143 // to avoid struct H(E::PublicKeyGroup::Affine::Uncompressed);
144 type AA<E> = (PublicKeyAffine<E>, SignatureProjective<E>);
145 let mut pks_n_ms = HashMap::with_capacity(l);
146 for (pk, m) in affine_publickeys.drain(..).zip(messages.drain(..)) {
147 let mut pk_uncompressed = vec![0; pk.uncompressed_size()];
148 pk.serialize_uncompressed(&mut pk_uncompressed[..]).unwrap();
149 pks_n_ms
150 .entry(pk_uncompressed)
151 .and_modify(|(_pk0, m0): &mut AA<S::E>| {
152 *m0 += m;
153 })
154 .or_insert((pk, m));
155 }
156
157 let mut publickeys = Vec::with_capacity(l);
158 for (_, (pk, m)) in pks_n_ms.drain() {
159 messages.push(m);
160 publickeys.push(pk.clone());
161 }
162
163 // We finally normalize the messages and signature
164
165 messages.push(signature);
166 let mut affine_messages =
167 <<S as Signed>::E as EngineBLS>::SignatureGroup::normalize_batch(messages.as_mut_slice());
168 let signature =
169 <<S as Signed>::E as EngineBLS>::prepare_signature(affine_messages.pop().unwrap());
170 // TODO: Assess if we could cache normalized message hashes anyplace
171 // using interior mutability, but probably this does not work well
172 // with ur optimization of collecting messages with thesame signer.
173
174 // And verify the aggregate signature.
175 let prepared = publickeys
176 .iter()
177 .zip(messages)
178 .map(|(pk, m)| {
179 (
180 <<S as Signed>::E as EngineBLS>::prepare_public_key(*pk),
181 <<S as Signed>::E as EngineBLS>::prepare_signature(m),
182 )
183 })
184 .collect::<Vec<(_, _)>>();
185
186 S::E::verify_prepared(signature, prepared.iter())
187
188 //let prepared = publickeys.iter().zip(&messages);
189 //S::E::verify_prepared( &signature, prepared )
190}
191
192/// BLS signature verification optimized for all unique messages
193///
194/// Assuming all messages are distinct, the minimum number of pairings
195/// is the number of unique signers, which we achieve here.
196/// We do not verify message uniqueness here, but leave this to the
197/// aggregate signature type, like `DistinctMessages`.
198///
199/// We merge any messages with identical signers and batch normalize
200/// message points and the signature itself.
201/// We optionally batch normalize the public keys in the event that
202/// they are provided by algerbaic operaations, but this sounds
203/// unlikely given our requirement that messages be distinct.
204#[cfg(feature = "std")]
205pub fn verify_using_aggregated_auxiliary_public_keys<
206 E: EngineBLS,
207 H: DynDigest + Default + Clone,
208>(
209 signed: &single_pop_aggregator::SignatureAggregatorAssumingPoP<E>,
210 normalize_public_keys: bool,
211 aggregated_aux_pub_key: <E as EngineBLS>::SignatureGroup,
212) -> bool {
213 let signature = Signed::signature(&signed).0;
214
215 let mut signature_as_bytes = vec![0; signature.compressed_size()];
216 signature
217 .serialize_compressed(&mut signature_as_bytes[..])
218 .expect("compressed size has been alocated");
219
220 let itr = signed.messages_and_publickeys();
221 let l = {
222 let (lower, upper) = itr.size_hint();
223 upper.unwrap_or(lower)
224 };
225 let (first_message, first_public_key) = match signed.messages_and_publickeys().next() {
226 Some((first_message, first_public_key)) => (first_message, first_public_key),
227 None => return false,
228 };
229
230 let mut first_public_key_as_bytes = vec![0; first_public_key.compressed_size()];
231 first_public_key
232 .serialize_compressed(&mut first_public_key_as_bytes[..])
233 .expect("compressed size has been alocated");
234
235 let first_message_point = first_message.hash_to_signature_curve::<E>();
236 let first_message_point_as_bytes = E::signature_point_to_byte(&first_message_point);
237
238 let mut aggregated_aux_pub_key_as_bytes = vec![0; aggregated_aux_pub_key.compressed_size()];
239 aggregated_aux_pub_key
240 .serialize_compressed(&mut aggregated_aux_pub_key_as_bytes[..])
241 .expect("compressed size has been alocated");
242
243 // We first hash the messages to the signature curve and
244 // normalize the public keys to operate on them as bytes.
245 // TODO: Assess if we should mutate in place using interior
246 // mutability, maybe using `BorrowMut` support in
247 // `batch_normalization`.
248
249 // deterministic randomness for adding aggregated auxiliary pub keys
250 //TODO you can't just assume that there is one pubickey you need to stop if they were more or aggregate them
251
252 let pseudo_random_scalar_seed = [
253 first_message_point_as_bytes,
254 first_public_key_as_bytes,
255 aggregated_aux_pub_key_as_bytes,
256 signature_as_bytes,
257 ]
258 .concat();
259
260 let hasher = <DefaultFieldHasher<H> as HashToField<E::Scalar>>::new(&[]);
261 let pseudo_random_scalar: E::Scalar =
262 hasher.hash_to_field(&pseudo_random_scalar_seed[..], 1)[0];
263
264 let signature = signature + aggregated_aux_pub_key * pseudo_random_scalar;
265
266 let mut publickeys = Vec::with_capacity(l);
267 let mut messages = Vec::with_capacity(l + 1);
268
269 //Simplify from here on.
270 for (m, pk) in itr {
271 publickeys.push(pk.0.clone());
272 messages.push(
273 m.hash_to_signature_curve::<E>()
274 + E::SignatureGroupAffine::generator() * pseudo_random_scalar,
275 );
276 }
277
278 let mut affine_publickeys = if normalize_public_keys {
279 <E as EngineBLS>::PublicKeyGroup::normalize_batch(&publickeys)
280 } else {
281 publickeys
282 .iter()
283 .map(|pk| pk.into_affine())
284 .collect::<Vec<_>>()
285 };
286
287 // We next accumulate message points with the same signer.
288 // We could avoid the allocation here if we sorted both
289 // arrays in parallel. This might mean (a) some sort function
290 // using `ops::IndexMut` instead of slices, and (b) wrapper types
291 // types to make tuples of slices satisfy `ops::IndexMut`.
292 // TODO: Impl PartialEq, Eq, Hash for pairing::EncodedPoint
293 // to avoid struct H(E::PublicKeyGroup::Affine::Uncompressed);
294 type AA<E> = (PublicKeyAffine<E>, SignatureProjective<E>);
295 let mut pks_n_ms = HashMap::with_capacity(l);
296 for (pk, m) in affine_publickeys.drain(..).zip(messages.drain(..)) {
297 let mut pk_uncompressed = vec![0; pk.uncompressed_size()];
298 pk.serialize_uncompressed(&mut pk_uncompressed[..]).unwrap();
299 pks_n_ms
300 .entry(pk_uncompressed)
301 .and_modify(|(_pk0, m0): &mut AA<E>| {
302 *m0 += m;
303 })
304 .or_insert((pk, m));
305 }
306
307 let mut publickeys = Vec::with_capacity(l);
308 for (_, (pk, m)) in pks_n_ms.drain() {
309 messages.push(m);
310 publickeys.push(pk.clone());
311 }
312
313 // We finally normalize the messages and signature
314
315 messages.push(signature);
316 let mut affine_messages =
317 <E as EngineBLS>::SignatureGroup::normalize_batch(messages.as_mut_slice());
318 let signature = <E as EngineBLS>::prepare_signature(affine_messages.pop().unwrap());
319 // TODO: Assess if we could cache normalized message hashes anyplace
320 // using interior mutability, but probably this does not work well
321 // with ur optimization of collecting messages with thesame signer.
322
323 // And verify the aggregate signature.
324 let prepared = publickeys
325 .iter()
326 .zip(messages)
327 .map(|(pk, m)| {
328 (
329 <E as EngineBLS>::prepare_public_key(*pk),
330 <E as EngineBLS>::prepare_signature(m),
331 )
332 })
333 .collect::<Vec<(_, _)>>();
334
335 E::verify_prepared(signature, prepared.iter())
336
337 //let prepared = publickeys.iter().zip(&messages);
338 //S::E::verify_prepared( &signature, prepared )
339}
340
341/*
342/// Excessively optimized BLS signature verification
343///
344/// We minimize the number of pairing operations by doing two
345/// basis change operation using Gaussian elimination, first in the
346/// message space and then in the signer space. As a result, we
347/// do only `1 + min(msg_d,pk_d)` pairings where `msg_d` and `pk_d`
348/// are the numbers of distinct messages and signers, respectively.
349///
350/// We expect this to improve performance dramatically when both
351/// signers and messages are repeated enough, simpler strategies
352/// work as well or better when say messages are distinct.
353///
354/// Explination:
355///
356/// We consider the bipartite graph with vertex sets given by points
357/// on the two curves and edges given by desired pairings between them.
358/// We let $M$ denote the bipartite adjacency matrix for this graph,
359/// so that multiplying $M$ on the the right and left by the vectors
360/// of messages and signers respectively reproduces our original sum
361/// of pairings.
362///
363/// We first use elementary "row" operations to make $M$ upper
364/// triangular, as in Gaussian elimination, but at the cost of also
365/// performing one-sided "change of basis" operations that collect
366/// our original "basis vectors" into sums of curve points.
367/// We next use elementary "column" operations to make $M$ diagonal,
368/// again adjusting the basis with curve point operations.
369///
370/// In this, we regard $M$ as a matrix over the scalar field $F_p$
371/// so we may do row or column swaps and row or column addition
372/// operations with small scalars, but not lone row or column scalar
373/// multiplication because these always involve divisions, which
374/// produces large curve points that slow us down thereafter.
375/// We do not require such divisions because we do not solve any
376/// system of equations and do not need ones on the diagonal.
377///
378/// TODO:
379/// We leave implementing this optimization to near future work
380/// because it benifits from public keys being affine or having
381/// another hashable representation.
382///
383///
384/// As a curiosity, we note one interesting but suboptimal algorithm
385/// that avoids small scalar multiplications when doing this:
386///
387/// If we ignore subtraction, then the minimal number of pairing
388/// operations required to verify aggregated BLS signatures is the
389/// minimal bipartite edge cover, aka bipartite dimension, of the
390/// bipartite graph with vertices given by points on the two curves
391/// and edges given by desired pairings.
392/// In general, this problem is NP-hard even to approximate.
393/// See: https://en.wikipedia.org/wiki/Bipartite_dimension
394///
395/// There are polynomial time algorithms for bipartite edge cover in
396/// special cases, with domino-free graphs being among the widest
397/// known classes. See:
398/// Amilhastre, Jérôme; Janssen, Philippe; Vilarem, Marie-Catherine,
399/// "Computing a minimum biclique cover is polynomial for bipartite domino-free graphs" (1997)
400/// https://core.ac.uk/download/pdf/82546650.pdf
401///
402/// If we now exploit subtraction, then these dominos can be
403/// completed into $K_{3,3}$s, like
404/// $(a,x)+(a,y)+(b,x)+(b,y)+(b,z)+(c,y)+(c,z) = (a+b+c,x+y+z) - (a,z) - (c,z)$
405/// which looks optimal for itself, and likely permits the further
406/// aggregation, and maybe the subtracted terms can be aggregated later.
407///
408/// We could not however find the optimal numbers of pairings by
409/// completing dominos like this because (a+b+c,x+y+z) - (b,y),
410/// which looks optimal for itself, but only has one subtraction.
411fn verify_with_gaussian_elimination<S: Signed>(s: S) -> bool {
412 unimplemented!()
413}
414
415*/